id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,100
|
_junit_xml.py
|
ansible_ansible/lib/ansible/utils/_junit_xml.py
|
"""
Dataclasses for creating JUnit XML files.
See: https://github.com/junit-team/junit5/blob/main/platform-tests/src/test/resources/jenkins-junit.xsd
"""
from __future__ import annotations
import abc
import dataclasses
import datetime
import decimal
from xml.dom import minidom
# noinspection PyPep8Naming
from xml.etree import ElementTree as ET
@dataclasses.dataclass
class TestResult(metaclass=abc.ABCMeta):
"""Base class for the result of a test case."""
output: str | None = None
message: str | None = None
type: str | None = None
def __post_init__(self):
if self.type is None:
self.type = self.tag
@property
@abc.abstractmethod
def tag(self) -> str:
"""Tag name for the XML element created by this result type."""
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
message=self.message,
type=self.type,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element(self.tag, self.get_attributes())
element.text = self.output
return element
@dataclasses.dataclass
class TestFailure(TestResult):
"""Failure info for a test case."""
@property
def tag(self) -> str:
"""Tag name for the XML element created by this result type."""
return 'failure'
@dataclasses.dataclass
class TestError(TestResult):
"""Error info for a test case."""
@property
def tag(self) -> str:
"""Tag name for the XML element created by this result type."""
return 'error'
@dataclasses.dataclass
class TestCase:
"""An individual test case."""
name: str
assertions: int | None = None
classname: str | None = None
status: str | None = None
time: decimal.Decimal | None = None
errors: list[TestError] = dataclasses.field(default_factory=list)
failures: list[TestFailure] = dataclasses.field(default_factory=list)
skipped: str | None = None
system_out: str | None = None
system_err: str | None = None
is_disabled: bool = False
@property
def is_failure(self) -> bool:
"""True if the test case contains failure info."""
return bool(self.failures)
@property
def is_error(self) -> bool:
"""True if the test case contains error info."""
return bool(self.errors)
@property
def is_skipped(self) -> bool:
"""True if the test case was skipped."""
return bool(self.skipped)
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
assertions=self.assertions,
classname=self.classname,
name=self.name,
status=self.status,
time=self.time,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testcase', self.get_attributes())
if self.skipped:
ET.SubElement(element, 'skipped').text = self.skipped
element.extend([error.get_xml_element() for error in self.errors])
element.extend([failure.get_xml_element() for failure in self.failures])
if self.system_out:
ET.SubElement(element, 'system-out').text = self.system_out
if self.system_err:
ET.SubElement(element, 'system-err').text = self.system_err
return element
@dataclasses.dataclass
class TestSuite:
"""A collection of test cases."""
name: str
hostname: str | None = None
id: str | None = None
package: str | None = None
timestamp: datetime.datetime | None = None
properties: dict[str, str] = dataclasses.field(default_factory=dict)
cases: list[TestCase] = dataclasses.field(default_factory=list)
system_out: str | None = None
system_err: str | None = None
def __post_init__(self):
if self.timestamp and self.timestamp.tzinfo != datetime.timezone.utc:
raise ValueError(f'timestamp.tzinfo must be {datetime.timezone.utc!r}')
@property
def disabled(self) -> int:
"""The number of disabled test cases."""
return sum(case.is_disabled for case in self.cases)
@property
def errors(self) -> int:
"""The number of test cases containing error info."""
return sum(case.is_error for case in self.cases)
@property
def failures(self) -> int:
"""The number of test cases containing failure info."""
return sum(case.is_failure for case in self.cases)
@property
def skipped(self) -> int:
"""The number of test cases containing skipped info."""
return sum(case.is_skipped for case in self.cases)
@property
def tests(self) -> int:
"""The number of test cases."""
return len(self.cases)
@property
def time(self) -> decimal.Decimal:
"""The total time from all test cases."""
return decimal.Decimal(sum(case.time for case in self.cases if case.time))
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
disabled=self.disabled,
errors=self.errors,
failures=self.failures,
hostname=self.hostname,
id=self.id,
name=self.name,
package=self.package,
skipped=self.skipped,
tests=self.tests,
time=self.time,
timestamp=self.timestamp.replace(tzinfo=None).isoformat(timespec='seconds') if self.timestamp else None,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testsuite', self.get_attributes())
if self.properties:
ET.SubElement(element, 'properties').extend([ET.Element('property', dict(name=name, value=value)) for name, value in self.properties.items()])
element.extend([test_case.get_xml_element() for test_case in self.cases])
if self.system_out:
ET.SubElement(element, 'system-out').text = self.system_out
if self.system_err:
ET.SubElement(element, 'system-err').text = self.system_err
return element
@dataclasses.dataclass
class TestSuites:
"""A collection of test suites."""
name: str | None = None
suites: list[TestSuite] = dataclasses.field(default_factory=list)
@property
def disabled(self) -> int:
"""The number of disabled test cases."""
return sum(suite.disabled for suite in self.suites)
@property
def errors(self) -> int:
"""The number of test cases containing error info."""
return sum(suite.errors for suite in self.suites)
@property
def failures(self) -> int:
"""The number of test cases containing failure info."""
return sum(suite.failures for suite in self.suites)
@property
def tests(self) -> int:
"""The number of test cases."""
return sum(suite.tests for suite in self.suites)
@property
def time(self) -> decimal.Decimal:
"""The total time from all test cases."""
return decimal.Decimal(sum(suite.time for suite in self.suites))
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
disabled=self.disabled,
errors=self.errors,
failures=self.failures,
name=self.name,
tests=self.tests,
time=self.time,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testsuites', self.get_attributes())
element.extend([suite.get_xml_element() for suite in self.suites])
return element
def to_pretty_xml(self) -> str:
"""Return a pretty formatted XML string representing this instance."""
return _pretty_xml(self.get_xml_element())
def _attributes(**kwargs) -> dict[str, str]:
"""Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted."""
return {key: str(value) for key, value in kwargs.items() if value is not None}
def _pretty_xml(element: ET.Element) -> str:
"""Return a pretty formatted XML string representing the given element."""
return minidom.parseString(ET.tostring(element, encoding='unicode')).toprettyxml()
| 8,671
|
Python
|
.py
| 211
| 33.691943
| 154
| 0.65078
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,101
|
unicode.py
|
ansible_ansible/lib/ansible/utils/unicode.py
|
# (c) 2012-2014, Toshio Kuratomi <a.badger@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.module_utils.common.text.converters import to_text
__all__ = ('unicode_wrap',)
def unicode_wrap(func, *args, **kwargs):
"""If a function returns a string, force it to be a text string.
Use with partial to ensure that filter plugins will return text values.
"""
return to_text(func(*args, **kwargs), nonstring='passthru')
| 1,100
|
Python
|
.py
| 24
| 43.875
| 75
| 0.754911
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,102
|
lock.py
|
ansible_ansible/lib/ansible/utils/lock.py
|
# Copyright (c) 2020 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from functools import wraps
def lock_decorator(attr='missing_lock_attr', lock=None):
"""This decorator is a generic implementation that allows you
to either use a pre-defined instance attribute as the location
of the lock, or to explicitly pass a lock object.
This code was implemented with ``threading.Lock`` in mind, but
may work with other locks, assuming that they function as
context managers.
When using ``attr``, the assumption is the first argument to
the wrapped method, is ``self`` or ``cls``.
Examples:
@lock_decorator(attr='_callback_lock')
def send_callback(...):
@lock_decorator(lock=threading.Lock())
def some_method(...):
"""
def outer(func):
@wraps(func)
def inner(*args, **kwargs):
# Python2 doesn't have ``nonlocal``
# assign the actual lock to ``_lock``
if lock is None:
_lock = getattr(args[0], attr)
else:
_lock = lock
with _lock:
return func(*args, **kwargs)
return inner
return outer
| 1,306
|
Python
|
.py
| 32
| 32.90625
| 92
| 0.63083
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,103
|
jsonrpc.py
|
ansible_ansible/lib/ansible/utils/jsonrpc.py
|
# (c) 2017, Peter Sprygada <psprygad@redhat.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import json
import pickle
import traceback
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.six import binary_type, text_type
from ansible.utils.display import Display
display = Display()
class JsonRpcServer(object):
_objects = set() # type: set[object]
def handle_request(self, request):
request = json.loads(to_text(request, errors='surrogate_then_replace'))
method = request.get('method')
if method.startswith('rpc.') or method.startswith('_'):
error = self.invalid_request()
return json.dumps(error)
args, kwargs = request.get('params')
setattr(self, '_identifier', request.get('id'))
rpc_method = None
for obj in self._objects:
rpc_method = getattr(obj, method, None)
if rpc_method:
break
if not rpc_method:
error = self.method_not_found()
response = json.dumps(error)
else:
try:
result = rpc_method(*args, **kwargs)
except ConnectionError as exc:
display.vvv(traceback.format_exc())
try:
error = self.error(code=exc.code, message=to_text(exc))
except AttributeError:
error = self.internal_error(data=to_text(exc))
response = json.dumps(error)
except Exception as exc:
display.vvv(traceback.format_exc())
error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace'))
response = json.dumps(error)
else:
if isinstance(result, dict) and 'jsonrpc' in result:
response = result
else:
response = self.response(result)
try:
response = json.dumps(response)
except Exception as exc:
display.vvv(traceback.format_exc())
error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace'))
response = json.dumps(error)
delattr(self, '_identifier')
return response
def register(self, obj):
self._objects.add(obj)
def header(self):
return {'jsonrpc': '2.0', 'id': self._identifier}
def response(self, result=None):
response = self.header()
if isinstance(result, binary_type):
result = to_text(result)
if not isinstance(result, text_type):
response["result_type"] = "pickle"
result = to_text(pickle.dumps(result), errors='surrogateescape')
response['result'] = result
return response
def error(self, code, message, data=None):
response = self.header()
error = {'code': code, 'message': message}
if data:
error['data'] = data
response['error'] = error
return response
# json-rpc standard errors (-32768 .. -32000)
def parse_error(self, data=None):
return self.error(-32700, 'Parse error', data)
def method_not_found(self, data=None):
return self.error(-32601, 'Method not found', data)
def invalid_request(self, data=None):
return self.error(-32600, 'Invalid request', data)
def invalid_params(self, data=None):
return self.error(-32602, 'Invalid params', data)
def internal_error(self, data=None):
return self.error(-32603, 'Internal error', data)
| 3,806
|
Python
|
.py
| 88
| 32.795455
| 99
| 0.602599
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,104
|
path.py
|
ansible_ansible/lib/ansible/utils/path.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import shutil
from errno import EEXIST
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True, basedir=None):
"""
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:arg basedir: A byte string, text string, PathLike object, or `None`
representing where a relative path should be resolved from.
`None` will be substituted for the current working directory.
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
"""
b_basedir = to_bytes(basedir, errors='surrogate_or_strict', nonstring='passthru')
if b_basedir is None:
b_basedir = to_bytes(os.getcwd(), errors='surrogate_or_strict')
elif os.path.isfile(b_basedir):
b_basedir = os.path.dirname(b_basedir)
b_final_path = os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))
if not os.path.isabs(b_final_path):
b_final_path = os.path.join(b_basedir, b_final_path)
if follow:
b_final_path = os.path.realpath(b_final_path)
return to_text(os.path.normpath(b_final_path), errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
"""
A *potentially insecure* way to ensure the existence of a directory chain. The "safe" in this function's name
refers only to its ability to ignore `EEXIST` in the case of multiple callers operating on the same part of
the directory chain. This function is not safe to use under world-writable locations when the first level of the
path to be created contains a predictable component. Always create a randomly-named element first if there is any
chance the parent directory might be world-writable (eg, /tmp) to prevent symlink hijacking and potential
disclosure or modification of sensitive file contents.
:arg path: A byte or text string representing a directory chain to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exist.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
"""
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
def basedir(source):
""" returns directory for inventory or playbook """
source = to_bytes(source, errors='surrogate_or_strict')
dname = None
if os.path.isdir(source):
dname = source
elif source in [None, '', '.']:
dname = os.getcwd()
elif os.path.isfile(source):
dname = os.path.dirname(source)
if dname:
# don't follow symlinks for basedir, enables source re-use
dname = os.path.abspath(dname)
return to_text(dname, errors='surrogate_or_strict')
def cleanup_tmp_file(path, warn=False):
"""
Removes temporary file or directory. Optionally display a warning if unable
to remove the file or directory.
:arg path: Path to file or directory to be removed
:kwarg warn: Whether or not to display a warning when the file or directory
cannot be removed
"""
try:
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.unlink(path)
except Exception as e:
if warn:
# Importing here to avoid circular import
from ansible.utils.display import Display
display = Display()
display.display(u'Unable to remove temporary file {0}'.format(to_text(e)))
except Exception:
pass
def is_subpath(child, parent, real=False):
"""
Compares paths to check if one is contained in the other
:arg: child: Path to test
:arg parent; Path to test against
"""
test = False
abs_child = unfrackpath(child, follow=False)
abs_parent = unfrackpath(parent, follow=False)
if real:
abs_child = os.path.realpath(abs_child)
abs_parent = os.path.realpath(abs_parent)
c = abs_child.split(os.path.sep)
p = abs_parent.split(os.path.sep)
try:
test = c[:len(p)] == p
except IndexError:
# child is shorter than parent so cannot be subpath
pass
return test
| 6,051
|
Python
|
.py
| 131
| 39.473282
| 125
| 0.685462
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,105
|
fqcn.py
|
ansible_ansible/lib/ansible/utils/fqcn.py
|
# (c) 2020, Felix Fontein <felix@fontein.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
def add_internal_fqcns(names):
"""
Given a sequence of action/module names, returns a list of these names
with the same names with the prefixes `ansible.builtin.` and
`ansible.legacy.` added for all names that are not already FQCNs.
"""
result = []
for name in names:
result.append(name)
if '.' not in name:
result.append('ansible.builtin.%s' % name)
result.append('ansible.legacy.%s' % name)
return result
| 1,215
|
Python
|
.py
| 30
| 37.033333
| 74
| 0.722739
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,106
|
encrypt.py
|
ansible_ansible/lib/ansible/utils/encrypt.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import random
import string
from collections import namedtuple
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils.six import text_type
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.utils.display import Display
PASSLIB_E = None
PASSLIB_AVAILABLE = False
try:
import passlib
import passlib.hash
from passlib.utils.handlers import HasRawSalt, PrefixWrapper
try:
from passlib.utils.binary import bcrypt64
except ImportError:
from passlib.utils import bcrypt64
PASSLIB_AVAILABLE = True
except Exception as e:
PASSLIB_E = e
display = Display()
__all__ = ['do_encrypt']
DEFAULT_PASSWORD_LENGTH = 20
def random_password(length=DEFAULT_PASSWORD_LENGTH, chars=C.DEFAULT_PASSWORD_CHARS, seed=None):
"""Return a random password string of length containing only chars
:kwarg length: The number of characters in the new password. Defaults to 20.
:kwarg chars: The characters to choose from. The default is all ascii
letters, ascii digits, and these symbols ``.,:-_``
"""
if not isinstance(chars, text_type):
raise AnsibleAssertionError('%s (%s) is not a text_type' % (chars, type(chars)))
if seed is None:
random_generator = random.SystemRandom()
else:
random_generator = random.Random(seed)
return u''.join(random_generator.choice(chars) for dummy in range(length))
def random_salt(length=8):
"""Return a text string suitable for use as a salt for the hash functions we use to encrypt passwords.
"""
# Note passlib salt values must be pure ascii so we can't let the user
# configure this
salt_chars = string.ascii_letters + string.digits + u'./'
return random_password(length=length, chars=salt_chars)
class BaseHash(object):
algo = namedtuple('algo', ['crypt_id', 'salt_size', 'implicit_rounds', 'salt_exact', 'implicit_ident'])
algorithms = {
'md5_crypt': algo(crypt_id='1', salt_size=8, implicit_rounds=None, salt_exact=False, implicit_ident=None),
'bcrypt': algo(crypt_id='2b', salt_size=22, implicit_rounds=12, salt_exact=True, implicit_ident='2b'),
'sha256_crypt': algo(crypt_id='5', salt_size=16, implicit_rounds=535000, salt_exact=False, implicit_ident=None),
'sha512_crypt': algo(crypt_id='6', salt_size=16, implicit_rounds=656000, salt_exact=False, implicit_ident=None),
}
def __init__(self, algorithm):
self.algorithm = algorithm
class PasslibHash(BaseHash):
def __init__(self, algorithm):
super(PasslibHash, self).__init__(algorithm)
if not PASSLIB_AVAILABLE:
raise AnsibleError("passlib must be installed and usable to hash with '%s'" % algorithm, orig_exc=PASSLIB_E)
display.vv("Using passlib to hash input with '%s'" % algorithm)
try:
self.crypt_algo = getattr(passlib.hash, algorithm)
except Exception:
raise AnsibleError("passlib does not support '%s' algorithm" % algorithm)
def hash(self, secret, salt=None, salt_size=None, rounds=None, ident=None):
salt = self._clean_salt(salt)
rounds = self._clean_rounds(rounds)
ident = self._clean_ident(ident)
return self._hash(secret, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
def _clean_ident(self, ident):
ret = None
if not ident:
if self.algorithm in self.algorithms:
return self.algorithms.get(self.algorithm).implicit_ident
return ret
if self.algorithm == 'bcrypt':
return ident
return ret
def _clean_salt(self, salt):
if not salt:
return None
elif issubclass(self.crypt_algo.wrapped if isinstance(self.crypt_algo, PrefixWrapper) else self.crypt_algo, HasRawSalt):
ret = to_bytes(salt, encoding='ascii', errors='strict')
else:
ret = to_text(salt, encoding='ascii', errors='strict')
# Ensure the salt has the correct padding
if self.algorithm == 'bcrypt':
ret = bcrypt64.repair_unused(ret)
return ret
def _clean_rounds(self, rounds):
algo_data = self.algorithms.get(self.algorithm)
if rounds:
return rounds
elif algo_data and algo_data.implicit_rounds:
# The default rounds used by passlib depend on the passlib version.
# For consistency ensure that passlib behaves the same as crypt in case no rounds were specified.
# Thus use the crypt defaults.
return algo_data.implicit_rounds
else:
return None
def _hash(self, secret, salt, salt_size, rounds, ident):
# Not every hash algorithm supports every parameter.
# Thus create the settings dict only with set parameters.
settings = {}
if salt:
settings['salt'] = salt
if salt_size:
settings['salt_size'] = salt_size
if rounds:
settings['rounds'] = rounds
if ident:
settings['ident'] = ident
# starting with passlib 1.7 'using' and 'hash' should be used instead of 'encrypt'
try:
if hasattr(self.crypt_algo, 'hash'):
result = self.crypt_algo.using(**settings).hash(secret)
elif hasattr(self.crypt_algo, 'encrypt'):
result = self.crypt_algo.encrypt(secret, **settings)
else:
raise AnsibleError("installed passlib version %s not supported" % passlib.__version__)
except ValueError as e:
raise AnsibleError("Could not hash the secret.", orig_exc=e)
# passlib.hash should always return something or raise an exception.
# Still ensure that there is always a result.
# Otherwise an empty password might be assumed by some modules, like the user module.
if not result:
raise AnsibleError("failed to hash with algorithm '%s'" % self.algorithm)
# Hashes from passlib.hash should be represented as ascii strings of hex
# digits so this should not traceback. If it's not representable as such
# we need to traceback and then block such algorithms because it may
# impact calling code.
return to_text(result, errors='strict')
def passlib_or_crypt(secret, algorithm, salt=None, salt_size=None, rounds=None, ident=None):
display.deprecated("passlib_or_crypt API is deprecated in favor of do_encrypt", version='2.20')
return do_encrypt(secret, algorithm, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
def do_encrypt(result, encrypt, salt_size=None, salt=None, ident=None, rounds=None):
if PASSLIB_AVAILABLE:
return PasslibHash(encrypt).hash(result, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
raise AnsibleError("Unable to encrypt nor hash, passlib must be installed", orig_exc=PASSLIB_E)
| 7,221
|
Python
|
.py
| 143
| 42.706294
| 128
| 0.6767
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,107
|
plugin_docs.py
|
ansible_ansible/lib/ansible/utils/plugin_docs.py
|
# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from collections.abc import MutableMapping, MutableSet, MutableSequence
from pathlib import Path
from ansible import constants as C
from ansible.release import __version__ as ansible_version
from ansible.errors import AnsibleError, AnsibleParserError, AnsiblePluginNotFound
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_native
from ansible.parsing.plugin_docs import read_docstring
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.utils.display import Display
display = Display()
def merge_fragment(target, source):
for key, value in source.items():
if key in target:
# assumes both structures have same type
if isinstance(target[key], MutableMapping):
value.update(target[key])
elif isinstance(target[key], MutableSet):
value.add(target[key])
elif isinstance(target[key], MutableSequence):
value = sorted(frozenset(value + target[key]))
else:
raise Exception("Attempt to extend a documentation fragment, invalid type for %s" % key)
target[key] = value
def _process_versions_and_dates(fragment, is_module, return_docs, callback):
def process_deprecation(deprecation, top_level=False):
collection_name = 'removed_from_collection' if top_level else 'collection_name'
if not isinstance(deprecation, MutableMapping):
return
if (is_module or top_level) and 'removed_in' in deprecation: # used in module deprecations
callback(deprecation, 'removed_in', collection_name)
if 'removed_at_date' in deprecation:
callback(deprecation, 'removed_at_date', collection_name)
if not (is_module or top_level) and 'version' in deprecation: # used in plugin option deprecations
callback(deprecation, 'version', collection_name)
def process_option_specifiers(specifiers):
for specifier in specifiers:
if not isinstance(specifier, MutableMapping):
continue
if 'version_added' in specifier:
callback(specifier, 'version_added', 'version_added_collection')
if isinstance(specifier.get('deprecated'), MutableMapping):
process_deprecation(specifier['deprecated'])
def process_options(options):
for option in options.values():
if not isinstance(option, MutableMapping):
continue
if 'version_added' in option:
callback(option, 'version_added', 'version_added_collection')
if not is_module:
if isinstance(option.get('env'), list):
process_option_specifiers(option['env'])
if isinstance(option.get('ini'), list):
process_option_specifiers(option['ini'])
if isinstance(option.get('vars'), list):
process_option_specifiers(option['vars'])
if isinstance(option.get('deprecated'), MutableMapping):
process_deprecation(option['deprecated'])
if isinstance(option.get('suboptions'), MutableMapping):
process_options(option['suboptions'])
def process_return_values(return_values):
for return_value in return_values.values():
if not isinstance(return_value, MutableMapping):
continue
if 'version_added' in return_value:
callback(return_value, 'version_added', 'version_added_collection')
if isinstance(return_value.get('contains'), MutableMapping):
process_return_values(return_value['contains'])
def process_attributes(attributes):
for attribute in attributes.values():
if not isinstance(attribute, MutableMapping):
continue
if 'version_added' in attribute:
callback(attribute, 'version_added', 'version_added_collection')
if not fragment:
return
if return_docs:
process_return_values(fragment)
return
if 'version_added' in fragment:
callback(fragment, 'version_added', 'version_added_collection')
if isinstance(fragment.get('deprecated'), MutableMapping):
process_deprecation(fragment['deprecated'], top_level=True)
if isinstance(fragment.get('options'), MutableMapping):
process_options(fragment['options'])
if isinstance(fragment.get('attributes'), MutableMapping):
process_attributes(fragment['attributes'])
def add_collection_to_versions_and_dates(fragment, collection_name, is_module, return_docs=False):
def add(options, option, collection_name_field):
if collection_name_field not in options:
options[collection_name_field] = collection_name
_process_versions_and_dates(fragment, is_module, return_docs, add)
def remove_current_collection_from_versions_and_dates(fragment, collection_name, is_module, return_docs=False):
def remove(options, option, collection_name_field):
if options.get(collection_name_field) == collection_name:
del options[collection_name_field]
_process_versions_and_dates(fragment, is_module, return_docs, remove)
def add_fragments(doc, filename, fragment_loader, is_module=False):
fragments = doc.pop('extends_documentation_fragment', [])
if isinstance(fragments, string_types):
fragments = fragments.split(',')
unknown_fragments = []
# doc_fragments are allowed to specify a fragment var other than DOCUMENTATION
# with a . separator; this is complicated by collections-hosted doc_fragments that
# use the same separator. Assume it's collection-hosted normally first, try to load
# as-specified. If failure, assume the right-most component is a var, split it off,
# and retry the load.
for fragment_slug in fragments:
fragment_name = fragment_slug.strip()
fragment_var = 'DOCUMENTATION'
fragment_class = fragment_loader.get(fragment_name)
if fragment_class is None and '.' in fragment_slug:
splitname = fragment_slug.rsplit('.', 1)
fragment_name = splitname[0]
fragment_var = splitname[1].upper()
fragment_class = fragment_loader.get(fragment_name)
if fragment_class is None:
unknown_fragments.append(fragment_slug)
continue
fragment_yaml = getattr(fragment_class, fragment_var, None)
if fragment_yaml is None:
if fragment_var != 'DOCUMENTATION':
# if it's asking for something specific that's missing, that's an error
unknown_fragments.append(fragment_slug)
continue
else:
fragment_yaml = '{}' # TODO: this is still an error later since we require 'options' below...
fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data()
real_fragment_name = getattr(fragment_class, 'ansible_name')
real_collection_name = '.'.join(real_fragment_name.split('.')[0:2]) if '.' in real_fragment_name else ''
add_collection_to_versions_and_dates(fragment, real_collection_name, is_module=is_module)
if 'notes' in fragment:
notes = fragment.pop('notes')
if notes:
if 'notes' not in doc:
doc['notes'] = []
doc['notes'].extend(notes)
if 'seealso' in fragment:
seealso = fragment.pop('seealso')
if seealso:
if 'seealso' not in doc:
doc['seealso'] = []
doc['seealso'].extend(seealso)
if 'options' not in fragment and 'attributes' not in fragment:
raise Exception("missing options or attributes in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename))
# ensure options themselves are directly merged
for doc_key in ['options', 'attributes']:
if doc_key in fragment:
if doc_key in doc:
try:
merge_fragment(doc[doc_key], fragment.pop(doc_key))
except Exception as e:
raise AnsibleError("%s %s (%s) of unknown type: %s" % (to_native(e), doc_key, fragment_name, filename))
else:
doc[doc_key] = fragment.pop(doc_key)
# merge rest of the sections
try:
merge_fragment(doc, fragment)
except Exception as e:
raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename))
if unknown_fragments:
raise AnsibleError('unknown doc_fragment(s) in file {0}: {1}'.format(filename, to_native(', '.join(unknown_fragments))))
def get_docstring(filename, fragment_loader, verbose=False, ignore_errors=False, collection_name=None, is_module=None, plugin_type=None):
"""
DOCUMENTATION can be extended using documentation fragments loaded by the PluginLoader from the doc_fragments plugins.
"""
if is_module is None:
if plugin_type is None:
is_module = False
else:
is_module = (plugin_type == 'module')
else:
# TODO deprecate is_module argument, now that we have 'type'
pass
data = read_docstring(filename, verbose=verbose, ignore_errors=ignore_errors)
if data.get('doc', False):
# add collection name to versions and dates
if collection_name is not None:
add_collection_to_versions_and_dates(data['doc'], collection_name, is_module=is_module)
# add fragments to documentation
add_fragments(data['doc'], filename, fragment_loader=fragment_loader, is_module=is_module)
if data.get('returndocs', False):
# add collection name to versions and dates
if collection_name is not None:
add_collection_to_versions_and_dates(data['returndocs'], collection_name, is_module=is_module, return_docs=True)
return data['doc'], data['plainexamples'], data['returndocs'], data['metadata']
def get_versioned_doclink(path):
"""
returns a versioned documentation link for the current Ansible major.minor version; used to generate
in-product warning/error links to the configured DOCSITE_ROOT_URL
(eg, https://docs.ansible.com/ansible/2.8/somepath/doc.html)
:param path: relative path to a document under docs/docsite/rst;
:return: absolute URL to the specified doc for the current version of Ansible
"""
path = to_native(path)
try:
base_url = C.config.get_config_value('DOCSITE_ROOT_URL')
if not base_url.endswith('/'):
base_url += '/'
if path.startswith('/'):
path = path[1:]
split_ver = ansible_version.split('.')
if len(split_ver) < 3:
raise RuntimeError('invalid version ({0})'.format(ansible_version))
doc_version = '{0}.{1}'.format(split_ver[0], split_ver[1])
# check to see if it's a X.Y.0 non-rc prerelease or dev release, if so, assume devel (since the X.Y doctree
# isn't published until beta-ish)
if split_ver[2].startswith('0'):
# exclude rc; we should have the X.Y doctree live by rc1
if any((pre in split_ver[2]) for pre in ['a', 'b']) or len(split_ver) > 3 and 'dev' in split_ver[3]:
doc_version = 'devel'
return '{0}{1}/{2}'.format(base_url, doc_version, path)
except Exception as ex:
return '(unable to create versioned doc link for path {0}: {1})'.format(path, to_native(ex))
def _find_adjacent(path, plugin, extensions):
adjacent = Path(path)
plugin_base_name = plugin.split('.')[-1]
if adjacent.stem != plugin_base_name:
# this should only affect filters/tests
adjacent = adjacent.with_name(plugin_base_name)
paths = []
for ext in extensions:
candidate = adjacent.with_suffix(ext)
if candidate == adjacent:
# we're looking for an adjacent file, skip this since it's identical
continue
if candidate.exists():
paths.append(to_native(candidate))
return paths
def find_plugin_docfile(plugin, plugin_type, loader):
""" if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding 'sidecar' file for docs """
context = loader.find_plugin_with_context(plugin, ignore_deprecated=False, check_aliases=True)
if (not context or not context.resolved) and plugin_type in ('filter', 'test'):
# should only happen for filters/test
plugin_obj, context = loader.get_with_context(plugin)
if not context or not context.resolved:
raise AnsiblePluginNotFound('%s was not found' % (plugin), plugin_load_context=context)
docfile = Path(context.plugin_resolved_path)
if docfile.suffix not in C.DOC_EXTENSIONS:
# only look for adjacent if plugin file does not support documents
filenames = _find_adjacent(docfile, plugin, C.DOC_EXTENSIONS)
filename = filenames[0] if filenames else None
else:
filename = to_native(docfile)
if filename is None:
raise AnsibleError('%s cannot contain DOCUMENTATION nor does it have a companion documentation file' % (plugin))
return filename, context
def get_plugin_docs(plugin, plugin_type, loader, fragment_loader, verbose):
docs = []
# find plugin doc file, if it doesn't exist this will throw error, we let it through
# can raise exception and short circuit when 'not found'
filename, context = find_plugin_docfile(plugin, plugin_type, loader)
collection_name = context.plugin_resolved_collection
try:
docs = get_docstring(filename, fragment_loader, verbose=verbose, collection_name=collection_name, plugin_type=plugin_type)
except Exception as e:
raise AnsibleParserError('%s did not contain a DOCUMENTATION attribute (%s)' % (plugin, filename), orig_exc=e)
# no good? try adjacent
if not docs[0]:
for newfile in _find_adjacent(filename, plugin, C.DOC_EXTENSIONS):
try:
docs = get_docstring(newfile, fragment_loader, verbose=verbose, collection_name=collection_name, plugin_type=plugin_type)
filename = newfile
if docs[0] is not None:
break
except Exception as e:
raise AnsibleParserError('Adjacent file %s did not contain a DOCUMENTATION attribute (%s)' % (plugin, filename), orig_exc=e)
# add extra data to docs[0] (aka 'DOCUMENTATION')
if docs[0] is None:
raise AnsibleParserError('No documentation available for %s (%s)' % (plugin, filename))
else:
docs[0]['filename'] = filename
docs[0]['collection'] = collection_name
docs[0]['plugin_name'] = context.resolved_fqcn
return docs
| 15,151
|
Python
|
.py
| 278
| 44.68705
| 140
| 0.656125
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,108
|
listify.py
|
ansible_ansible/lib/ansible/utils/listify.py
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from collections.abc import Iterable
from ansible.module_utils.six import string_types
from ansible.utils.display import Display
display = Display()
__all__ = ['listify_lookup_plugin_terms']
def listify_lookup_plugin_terms(terms, templar, fail_on_undefined=True, convert_bare=False):
if isinstance(terms, string_types):
terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
else:
terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
if isinstance(terms, string_types) or not isinstance(terms, Iterable):
terms = [terms]
return terms
| 1,387
|
Python
|
.py
| 30
| 43.566667
| 111
| 0.764662
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,109
|
vars.py
|
ansible_ansible/lib/ansible/utils/vars.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import keyword
import secrets
import uuid
from collections.abc import MutableMapping, MutableSequence
from json import dumps
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.parsing.splitter import parse_kv
_MAXSIZE = 2 ** 32
cur_id = 0
node_mac = ("%012x" % uuid.getnode())[:12]
random_int = ("%08x" % secrets.randbelow(_MAXSIZE))[:8]
def get_unique_id():
global cur_id
cur_id += 1
return "-".join([
node_mac[0:8],
node_mac[8:12],
random_int[0:4],
random_int[4:8],
("%012x" % cur_id)[:12],
])
def _validate_mutable_mappings(a, b):
"""
Internal convenience function to ensure arguments are MutableMappings
This checks that all arguments are MutableMappings or raises an error
:raises AnsibleError: if one of the arguments is not a MutableMapping
"""
# If this becomes generally needed, change the signature to operate on
# a variable number of arguments instead.
if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
myvars = []
for x in [a, b]:
try:
myvars.append(dumps(x))
except Exception:
myvars.append(to_native(x))
raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format(
a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1])
)
def combine_vars(a, b, merge=None):
"""
Return a copy of dictionaries of variables based on configured hash behavior
"""
if merge or merge is None and C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
# HASH_BEHAVIOUR == 'replace'
_validate_mutable_mappings(a, b)
result = a | b
return result
def merge_hash(x, y, recursive=True, list_merge='replace'):
"""
Return a new dictionary result of the merges of y into x,
so that keys from y take precedence over keys from x.
(x and y aren't modified)
"""
if list_merge not in ('replace', 'keep', 'append', 'prepend', 'append_rp', 'prepend_rp'):
raise AnsibleError("merge_hash: 'list_merge' argument can only be equal to 'replace', 'keep', 'append', 'prepend', 'append_rp' or 'prepend_rp'")
# verify x & y are dicts
_validate_mutable_mappings(x, y)
# to speed things up: if x is empty or equal to y, return y
# (this `if` can be remove without impact on the function
# except performance)
if x == {} or x == y:
return y.copy()
if y == {}:
return x
# in the following we will copy elements from y to x, but
# we don't want to modify x, so we create a copy of it
x = x.copy()
# to speed things up: use dict.update if possible
# (this `if` can be remove without impact on the function
# except performance)
if not recursive and list_merge == 'replace':
x.update(y)
return x
# insert each element of y in x, overriding the one in x
# (as y has higher priority)
# we copy elements from y to x instead of x to y because
# there is a high probability x will be the "default" dict the user
# want to "patch" with y
# therefore x will have much more elements than y
for key, y_value in y.items():
# if `key` isn't in x
# update x and move on to the next element of y
if key not in x:
x[key] = y_value
continue
# from this point we know `key` is in x
x_value = x[key]
# if both x's element and y's element are dicts
# recursively "combine" them or override x's with y's element
# depending on the `recursive` argument
# and move on to the next element of y
if isinstance(x_value, MutableMapping) and isinstance(y_value, MutableMapping):
if recursive:
x[key] = merge_hash(x_value, y_value, recursive, list_merge)
else:
x[key] = y_value
continue
# if both x's element and y's element are lists
# "merge" them depending on the `list_merge` argument
# and move on to the next element of y
if isinstance(x_value, MutableSequence) and isinstance(y_value, MutableSequence):
if list_merge == 'replace':
# replace x value by y's one as it has higher priority
x[key] = y_value
elif list_merge == 'append':
x[key] = x_value + y_value
elif list_merge == 'prepend':
x[key] = y_value + x_value
elif list_merge == 'append_rp':
# append all elements from y_value (high prio) to x_value (low prio)
# and remove x_value elements that are also in y_value
# we don't remove elements from x_value nor y_value that were already in double
# (we assume that there is a reason if there where such double elements)
# _rp stands for "remove present"
x[key] = [z for z in x_value if z not in y_value] + y_value
elif list_merge == 'prepend_rp':
# same as 'append_rp' but y_value elements are prepend
x[key] = y_value + [z for z in x_value if z not in y_value]
# else 'keep'
# keep x value even if y it's of higher priority
# it's done by not changing x[key]
continue
# else just override x's element with y's one
x[key] = y_value
return x
def load_extra_vars(loader):
if not getattr(load_extra_vars, 'extra_vars', None):
extra_vars = {}
for extra_vars_opt in context.CLIARGS.get('extra_vars', tuple()):
data = None
extra_vars_opt = to_text(extra_vars_opt, errors='surrogate_or_strict')
if extra_vars_opt is None or not extra_vars_opt:
continue
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
data = loader.load_from_file(extra_vars_opt[1:])
elif extra_vars_opt[0] in [u'/', u'.']:
raise AnsibleOptionsError("Please prepend extra_vars filename '%s' with '@'" % extra_vars_opt)
elif extra_vars_opt[0] in [u'[', u'{']:
# Arguments as YAML
data = loader.load(extra_vars_opt)
else:
# Arguments as Key-value
data = parse_kv(extra_vars_opt)
if isinstance(data, MutableMapping):
extra_vars = combine_vars(extra_vars, data)
else:
raise AnsibleOptionsError("Invalid extra vars data supplied. '%s' could not be made into a dictionary" % extra_vars_opt)
setattr(load_extra_vars, 'extra_vars', extra_vars)
return load_extra_vars.extra_vars
def load_options_vars(version):
if not getattr(load_options_vars, 'options_vars', None):
if version is None:
version = 'Unknown'
options_vars = {'ansible_version': version}
attrs = {'check': 'check_mode',
'diff': 'diff_mode',
'forks': 'forks',
'inventory': 'inventory_sources',
'skip_tags': 'skip_tags',
'subset': 'limit',
'tags': 'run_tags',
'verbosity': 'verbosity'}
for attr, alias in attrs.items():
opt = context.CLIARGS.get(attr)
if opt is not None:
options_vars['ansible_%s' % alias] = opt
setattr(load_options_vars, 'options_vars', options_vars)
return load_options_vars.options_vars
def isidentifier(ident):
"""Determine if string is valid identifier.
The purpose of this function is to be used to validate any variables created in
a play to be valid Python identifiers and to not conflict with Python keywords
to prevent unexpected behavior. Since Python 2 and Python 3 differ in what
a valid identifier is, this function unifies the validation so playbooks are
portable between the two. The following changes were made:
* disallow non-ascii characters (Python 3 allows for them as opposed to Python 2)
:arg ident: A text string of identifier to check. Note: It is callers
responsibility to convert ident to text if it is not already.
Originally posted at https://stackoverflow.com/a/29586366
"""
if not isinstance(ident, string_types):
return False
if not ident.isascii():
return False
if not ident.isidentifier():
return False
if keyword.iskeyword(ident):
return False
return True
| 9,640
|
Python
|
.py
| 213
| 36.859155
| 152
| 0.629187
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,110
|
galaxy.py
|
ansible_ansible/lib/ansible/utils/galaxy.py
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import tempfile
from subprocess import Popen, PIPE
import tarfile
import ansible.constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.utils.display import Display
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.text.converters import to_text, to_native
display = Display()
def scm_archive_collection(src, name=None, version='HEAD'):
return scm_archive_resource(src, scm='git', name=name, version=version, keep_scm_meta=False)
def scm_archive_resource(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
def run_scm_cmd(cmd, tempdir):
try:
stdout = ''
stderr = ''
popen = Popen(cmd, cwd=tempdir, stdout=PIPE, stderr=PIPE)
stdout, stderr = popen.communicate()
except Exception as e:
ran = " ".join(cmd)
display.debug("ran %s:" % ran)
raise AnsibleError("when executing %s: %s" % (ran, to_native(e)))
if popen.returncode != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s) - %s" % (' '.join(cmd), tempdir, popen.returncode, to_native(stderr)))
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
try:
scm_path = get_bin_path(scm)
except (ValueError, OSError, IOError):
raise AnsibleError("could not find/use %s, it is required to continue with installing %s" % (scm, src))
tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
clone_cmd = [scm_path, 'clone']
# Add specific options for ignoring certificates if requested
ignore_certs = context.CLIARGS['ignore_certs'] or C.GALAXY_IGNORE_CERTS
if ignore_certs:
if scm == 'git':
clone_cmd.extend(['-c', 'http.sslVerify=false'])
elif scm == 'hg':
clone_cmd.append('--insecure')
clone_cmd.extend([src, name])
run_scm_cmd(clone_cmd, tempdir)
if scm == 'git' and version:
checkout_cmd = [scm_path, 'checkout', to_text(version)]
run_scm_cmd(checkout_cmd, os.path.join(tempdir, name))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar', dir=C.DEFAULT_LOCAL_TMP)
archive_cmd = None
if keep_scm_meta:
display.vvv('tarring %s from %s to %s' % (name, tempdir, temp_file.name))
with tarfile.open(temp_file.name, "w") as tar:
tar.add(os.path.join(tempdir, name), arcname=name)
elif scm == 'hg':
archive_cmd = [scm_path, 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
elif scm == 'git':
archive_cmd = [scm_path, 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
if archive_cmd is not None:
display.vvv('archiving %s' % archive_cmd)
run_scm_cmd(archive_cmd, os.path.join(tempdir, name))
return temp_file.name
| 3,855
|
Python
|
.py
| 84
| 39.785714
| 146
| 0.666667
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,111
|
hashing.py
|
ansible_ansible/lib/ansible/utils/hashing.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from hashlib import sha1
try:
from hashlib import md5 as _md5
except ImportError:
# Assume we're running in FIPS mode here
_md5 = None
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes
def secure_hash_s(data, hash_func=sha1):
""" Return a secure hash hex digest of data. """
digest = hash_func()
data = to_bytes(data, errors='surrogate_or_strict')
digest.update(data)
return digest.hexdigest()
def secure_hash(filename, hash_func=sha1):
""" Return a secure hash hex digest of local file, None if file is not present or a directory. """
if not os.path.exists(to_bytes(filename, errors='surrogate_or_strict')) or os.path.isdir(to_bytes(filename, errors='strict')):
return None
digest = hash_func()
blocksize = 64 * 1024
try:
infile = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
except IOError as e:
raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
checksum = secure_hash
checksum_s = secure_hash_s
#
# Backwards compat functions. Some modules include md5s in their return values
# Continue to support that for now. As of ansible-1.8, all of those modules
# should also return "checksum" (sha1 for now)
# Do not use md5 unless it is needed for:
# 1) Optional backwards compatibility
# 2) Compliance with a third party protocol
#
# MD5 will not work on systems which are FIPS-140-2 compliant.
#
def md5s(data):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash_s(data, _md5)
def md5(filename):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash(filename, _md5)
| 2,837
|
Python
|
.py
| 69
| 37.478261
| 130
| 0.729818
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,112
|
display.py
|
ansible_ansible/lib/ansible/utils/display.py
|
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
try:
import curses
except ImportError:
HAS_CURSES = False
else:
# this will be set to False if curses.setupterm() fails
HAS_CURSES = True
import collections.abc as c
import codecs
import ctypes.util
import fcntl
import getpass
import io
import logging
import os
import secrets
import subprocess
import sys
import termios
import textwrap
import threading
import time
import tty
import typing as t
from functools import wraps
from struct import unpack, pack
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError, AnsiblePromptInterrupt, AnsiblePromptNoninteractive
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.six import text_type
from ansible.utils.color import stringc
from ansible.utils.multiprocessing import context as multiprocessing_context
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
if t.TYPE_CHECKING:
# avoid circular import at runtime
from ansible.executor.task_queue_manager import FinalQueue
P = t.ParamSpec('P')
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
MOVE_TO_BOL = b'\r'
CLEAR_TO_EOL = b'\x1b[K'
def get_text_width(text: str) -> int:
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters.
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter:
raise EnvironmentError(
'get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
if not os.path.isdir(path):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s %(levelname)s| %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print(f"[WARNING]: DEFAULT_LOG_PATH can not be a directory '{path}', aborting", file=sys.stderr)
else:
print(f"[WARNING]: log file at '{path}' is not writeable and we cannot create it, aborting\n", file=sys.stderr)
# map color to log levels, in order of priority (low to high)
color_to_log_level = {C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_VERBOSE: logging.INFO,
C.COLOR_OK: logging.INFO,
C.COLOR_INCLUDED: logging.INFO,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_WARN: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_ERROR: logging.ERROR}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
def _synchronize_textiowrapper(tio: t.TextIO, lock: threading.RLock):
# Ensure that a background thread can't hold the internal buffer lock on a file object
# during a fork, which causes forked children to hang. We're using display's existing lock for
# convenience (and entering the lock before a fork).
def _wrap_with_lock(f, lock):
@wraps(f)
def locking_wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return locking_wrapper
buffer = tio.buffer
# monkeypatching the underlying file-like object isn't great, but likely safer than subclassing
buffer.write = _wrap_with_lock(buffer.write, lock) # type: ignore[method-assign]
buffer.flush = _wrap_with_lock(buffer.flush, lock) # type: ignore[method-assign]
def setraw(fd: int, when: int = termios.TCSAFLUSH) -> None:
"""Put terminal into a raw mode.
Copied from ``tty`` from CPython 3.11.0, and modified to not remove OPOST from OFLAG
OPOST is kept to prevent an issue with multi line prompts from being corrupted now that display
is proxied via the queue from forks. The problem is a race condition, in that we proxy the display
over the fork, but before it can be displayed, this plugin will have continued executing, potentially
setting stdout and stdin to raw which remove output post processing that commonly converts NL to CRLF
"""
mode = termios.tcgetattr(fd)
mode[tty.IFLAG] = mode[tty.IFLAG] & ~(termios.BRKINT | termios.ICRNL | termios.INPCK | termios.ISTRIP | termios.IXON)
mode[tty.OFLAG] = mode[tty.OFLAG] & ~(termios.OPOST)
mode[tty.CFLAG] = mode[tty.CFLAG] & ~(termios.CSIZE | termios.PARENB)
mode[tty.CFLAG] = mode[tty.CFLAG] | termios.CS8
mode[tty.LFLAG] = mode[tty.LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
mode[tty.CC][termios.VMIN] = 1
mode[tty.CC][termios.VTIME] = 0
termios.tcsetattr(fd, when, mode)
def clear_line(stdout: t.BinaryIO) -> None:
stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
def setup_prompt(stdin_fd: int, stdout_fd: int, seconds: int, echo: bool) -> None:
setraw(stdin_fd)
# Only set stdout to raw mode if it is a TTY. This is needed when redirecting
# stdout to a file since a file cannot be set to raw mode.
if os.isatty(stdout_fd):
setraw(stdout_fd)
if echo:
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] = new_settings[3] | termios.ECHO
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
def setupterm() -> None:
# Nest the try except since curses.error is not available if curses did not import
try:
curses.setupterm()
except (curses.error, TypeError, io.UnsupportedOperation):
global HAS_CURSES
HAS_CURSES = False
else:
global MOVE_TO_BOL
global CLEAR_TO_EOL
# curses.tigetstr() returns None in some circumstances
MOVE_TO_BOL = curses.tigetstr('cr') or MOVE_TO_BOL
CLEAR_TO_EOL = curses.tigetstr('el') or CLEAR_TO_EOL
class Display(metaclass=Singleton):
def __init__(self, verbosity: int = 0) -> None:
self._final_q: FinalQueue | None = None
# NB: this lock is used to both prevent intermingled output between threads and to block writes during forks.
# Do not change the type of this lock or upgrade to a shared lock (eg multiprocessing.RLock).
self._lock = threading.RLock()
self.columns = None
self.verbosity = verbosity
if C.LOG_VERBOSITY is None:
self.log_verbosity = verbosity
else:
self.log_verbosity = max(verbosity, C.LOG_VERBOSITY)
# list of all deprecation messages to prevent duplicate display
self._deprecations: dict[str, int] = {}
self._warns: dict[str, int] = {}
self._errors: dict[str, int] = {}
self.b_cowsay: bytes | None = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if cmd.returncode:
raise Exception
self.cows_available: set[str] = {to_text(c) for c in out.split()}
if C.ANSIBLE_COW_ACCEPTLIST and any(C.ANSIBLE_COW_ACCEPTLIST):
self.cows_available = set(C.ANSIBLE_COW_ACCEPTLIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = None
self._set_column_width()
try:
# NB: we're relying on the display singleton behavior to ensure this only runs once
_synchronize_textiowrapper(sys.stdout, self._lock)
_synchronize_textiowrapper(sys.stderr, self._lock)
except Exception as ex:
self.warning(f"failed to patch stdout/stderr for fork-safety: {ex}")
codecs.register_error('_replacing_warning_handler', self._replacing_warning_handler)
try:
sys.stdout.reconfigure(errors='_replacing_warning_handler') # type: ignore[union-attr]
sys.stderr.reconfigure(errors='_replacing_warning_handler') # type: ignore[union-attr]
except Exception as ex:
self.warning(f"failed to reconfigure stdout/stderr with custom encoding error handler: {ex}")
self.setup_curses = False
def _replacing_warning_handler(self, exception: UnicodeError) -> tuple[str | bytes, int]:
# TODO: This should probably be deferred until after the current display is completed
# this will require some amount of new functionality
self.deprecated(
'Non UTF-8 encoded data replaced with "?" while displaying text to stdout/stderr, this is temporary and will become an error',
version='2.18',
)
return '?', exception.end
def set_queue(self, queue: FinalQueue) -> None:
"""Set the _final_q on Display, so that we know to proxy display over the queue
instead of directly writing to stdout/stderr from forks
This is only needed in ansible.executor.process.worker:WorkerProcess._run
"""
if multiprocessing_context.parent_process() is None:
raise RuntimeError('queue cannot be set in parent process')
self._final_q = queue
def set_cowsay_info(self) -> None:
if C.ANSIBLE_NOCOWS:
return
if C.ANSIBLE_COW_PATH:
self.b_cowsay = C.ANSIBLE_COW_PATH
else:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
@staticmethod
def _proxy(
func: c.Callable[t.Concatenate[Display, P], None]
) -> c.Callable[..., None]:
@wraps(func)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> None:
if self._final_q:
# If _final_q is set, that means we are in a WorkerProcess
# and instead of displaying messages directly from the fork
# we will proxy them through the queue
return self._final_q.send_display(func.__name__, *args, **kwargs)
return func(self, *args, **kwargs)
return wrapper
@staticmethod
def _meets_debug(
func: c.Callable[..., None]
) -> c.Callable[..., None]:
"""This method ensures that debug is enabled before delegating to the proxy
"""
@wraps(func)
def wrapper(self, msg: str, host: str | None = None) -> None:
if not C.DEFAULT_DEBUG:
return
return func(self, msg, host=host)
return wrapper
@staticmethod
def _meets_verbosity(
func: c.Callable[..., None]
) -> c.Callable[..., None]:
"""This method ensures the verbosity has been met before delegating to the proxy
Currently this method is unused, and the logic is handled directly in ``verbose``
"""
@wraps(func)
def wrapper(self, msg: str, host: str | None = None, caplevel: int = None) -> None:
if self.verbosity > caplevel:
return func(self, msg, host=host, caplevel=caplevel)
return
return wrapper
@_proxy
def display(
self,
msg: str,
color: str | None = None,
stderr: bool = False,
screen_only: bool = False,
log_only: bool = False,
newline: bool = True,
caplevel: int | None = None,
) -> None:
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
if not isinstance(msg, str):
raise TypeError(f'Display message must be str, not: {msg.__class__.__name__}')
nocolor = msg
if not log_only:
has_newline = msg.endswith(u'\n')
if has_newline:
msg2 = msg[:-1]
else:
msg2 = msg
if color:
msg2 = stringc(msg2, color)
if has_newline or newline:
msg2 = msg2 + u'\n'
# Note: After Display() class is refactored need to update the log capture
# code in 'cli/scripts/ansible_connection_cli_stub.py' (and other relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
with self._lock:
fileobj.write(msg2)
# With locks, and the fact that we aren't printing from forks
# just write, and let the system flush. Everything should come out peachy
# I've left this code for historical purposes, or in case we need to add this
# back at a later date. For now ``TaskQueueManager.cleanup`` will perform a
# final flush at shutdown.
# try:
# fileobj.flush()
# except IOError as e:
# # Ignore EPIPE in case fileobj has been prematurely closed, eg.
# # when piping to "head -n1"
# if e.errno != errno.EPIPE:
# raise
if logger and not screen_only:
self._log(nocolor, color, caplevel)
def _log(self, msg: str, color: str | None = None, caplevel: int | None = None):
if logger and (caplevel is None or self.log_verbosity > caplevel):
msg2 = msg.lstrip('\n')
if caplevel is None or caplevel > 0:
lvl = logging.INFO
elif caplevel == -1:
lvl = logging.ERROR
elif caplevel == -2:
lvl = logging.WARNING
elif caplevel == -3:
lvl = logging.DEBUG
elif color:
# set logger level based on color (not great)
# but last resort and backwards compatible
try:
lvl = color_to_log_level[color]
except KeyError:
# this should not happen if mapping is updated with new color configs, but JIC
raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
# actually log
logger.log(lvl, msg2)
def v(self, msg: str, host: str | None = None) -> None:
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg: str, host: str | None = None) -> None:
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg: str, host: str | None = None) -> None:
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg: str, host: str | None = None) -> None:
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg: str, host: str | None = None) -> None:
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg: str, host: str | None = None) -> None:
return self.verbose(msg, host=host, caplevel=5)
def verbose(self, msg: str, host: str | None = None, caplevel: int = 2) -> None:
if self.verbosity > caplevel:
self._verbose_display(msg, host=host, caplevel=caplevel)
if self.log_verbosity > self.verbosity and self.log_verbosity > caplevel:
self._verbose_log(msg, host=host, caplevel=caplevel)
@_proxy
def _verbose_display(self, msg: str, host: str | None = None, caplevel: int = 2) -> None:
to_stderr = C.VERBOSE_TO_STDERR
if host is None:
self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
@_proxy
def _verbose_log(self, msg: str, host: str | None = None, caplevel: int = 2) -> None:
# we send to log if log was configured with higher verbosity
if host is not None:
msg = "<%s> %s" % (host, msg)
self._log(msg, C.COLOR_VERBOSE, caplevel)
@_meets_debug
@_proxy
def debug(self, msg: str, host: str | None = None) -> None:
prefix = "%6d %0.5f" % (os.getpid(), time.time())
if host is not None:
prefix += f" [{host}]"
self.display(f"{prefix}: {msg}", color=C.COLOR_DEBUG, caplevel=-3)
def get_deprecation_message(
self,
msg: str,
version: str | None = None,
removed: bool = False,
date: str | None = None,
collection_name: str | None = None,
) -> str:
""" used to print out a deprecation message."""
msg = msg.strip()
if msg and msg[-1] not in ['!', '?', '.']:
msg += '.'
if collection_name == 'ansible.builtin':
collection_name = 'ansible-core'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
removal_fragment = 'This feature was removed'
help_text = 'Please update your playbooks.'
else:
header = '[DEPRECATION WARNING]: {0}'.format(msg)
removal_fragment = 'This feature will be removed'
# FUTURE: make this a standalone warning so it only shows up once?
help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
if collection_name:
from_fragment = 'from {0}'.format(collection_name)
else:
from_fragment = ''
if date:
when = 'in a release after {0}.'.format(date)
elif version:
when = 'in version {0}.'.format(version)
else:
when = 'in a future release.'
message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
return message_text
@_proxy
def deprecated(
self,
msg: str,
version: str | None = None,
removed: bool = False,
date: str | None = None,
collection_name: str | None = None,
) -> None:
if not removed and not C.DEPRECATION_WARNINGS:
return
message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
if removed:
raise AnsibleError(message_text)
wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
message_text = "\n".join(wrapped) + "\n"
if message_text not in self._deprecations:
self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[message_text] = 1
@_proxy
def warning(self, msg: str, formatted: bool = False) -> None:
if not formatted:
new_msg = "[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True, caplevel=-2)
self._warns[new_msg] = 1
@_proxy
def system_warning(self, msg: str) -> None:
if C.SYSTEM_WARNINGS:
self.warning(msg)
@_proxy
def banner(self, msg: str, color: str | None = None, cows: bool = True) -> None:
"""
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
"""
msg = to_text(msg)
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
try:
star_len = self.columns - get_text_width(msg)
except EnvironmentError:
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
@_proxy
def banner_cowsay(self, msg: str, color: str | None = None) -> None:
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = secrets.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
@_proxy
def error(self, msg: str, wrap_text: bool = True) -> None:
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True, caplevel=-1)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg: str, private: bool = False) -> str:
if private:
return getpass.getpass(msg)
else:
return input(msg)
def do_var_prompt(
self,
varname: str,
private: bool = True,
prompt: str | None = None,
encrypt: str | None = None,
confirm: bool = False,
salt_size: int | None = None,
salt: str | None = None,
default: str | None = None,
unsafe: bool = False,
) -> str:
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size=salt_size, salt=salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
if unsafe:
result = wrap_var(result)
return result
def _set_column_width(self) -> None:
if os.isatty(1):
tty_size = unpack('HHHH', fcntl.ioctl(1, termios.TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
def prompt_until(
self,
msg: str,
private: bool = False,
seconds: int | None = None,
interrupt_input: c.Container[bytes] | None = None,
complete_input: c.Container[bytes] | None = None,
) -> bytes:
if self._final_q:
from ansible.executor.process.worker import current_worker
self._final_q.send_prompt(
worker_id=current_worker.worker_id, prompt=msg, private=private, seconds=seconds,
interrupt_input=interrupt_input, complete_input=complete_input
)
return current_worker.worker_queue.get()
if HAS_CURSES and not self.setup_curses:
setupterm()
self.setup_curses = True
if (
self._stdin_fd is None
or not os.isatty(self._stdin_fd)
# Compare the current process group to the process group associated
# with terminal of the given file descriptor to determine if the process
# is running in the background.
or os.getpgrp() != os.tcgetpgrp(self._stdin_fd)
):
raise AnsiblePromptNoninteractive('stdin is not interactive')
# When seconds/interrupt_input/complete_input are all None, this does mostly the same thing as input/getpass,
# but self.prompt may raise a KeyboardInterrupt, which must be caught in the main thread.
# If the main thread handled this, it would also need to send a newline to the tty of any hanging pids.
# if seconds is None and interrupt_input is None and complete_input is None:
# try:
# return self.prompt(msg, private=private)
# except KeyboardInterrupt:
# # can't catch in the results_thread_main daemon thread
# raise AnsiblePromptInterrupt('user interrupt')
self.display(msg)
result = b''
with self._lock:
original_stdin_settings = termios.tcgetattr(self._stdin_fd)
try:
setup_prompt(self._stdin_fd, self._stdout_fd, seconds, not private)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(self._stdin, termios.TCIFLUSH)
# read input 1 char at a time until the optional timeout or complete/interrupt condition is met
return self._read_non_blocking_stdin(echo=not private, seconds=seconds, interrupt_input=interrupt_input, complete_input=complete_input)
finally:
# restore the old settings for the duped stdin stdin_fd
termios.tcsetattr(self._stdin_fd, termios.TCSADRAIN, original_stdin_settings)
def _read_non_blocking_stdin(
self,
echo: bool = False,
seconds: int | None = None,
interrupt_input: c.Container[bytes] | None = None,
complete_input: c.Container[bytes] | None = None,
) -> bytes:
if self._final_q:
raise NotImplementedError
if seconds is not None:
start = time.time()
if interrupt_input is None:
try:
interrupt = termios.tcgetattr(sys.stdin.buffer.fileno())[6][termios.VINTR]
except Exception:
interrupt = b'\x03' # value for Ctrl+C
try:
backspace_sequences = [termios.tcgetattr(self._stdin_fd)[6][termios.VERASE]]
except Exception:
# unsupported/not present, use default
backspace_sequences = [b'\x7f', b'\x08']
result_string = b''
while seconds is None or (time.time() - start < seconds):
key_pressed = None
try:
os.set_blocking(self._stdin_fd, False)
while key_pressed is None and (seconds is None or (time.time() - start < seconds)):
key_pressed = self._stdin.read(1)
# throttle to prevent excess CPU consumption
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
finally:
os.set_blocking(self._stdin_fd, True)
if key_pressed is None:
key_pressed = b''
if (interrupt_input is None and key_pressed == interrupt) or (interrupt_input is not None and key_pressed.lower() in interrupt_input):
clear_line(self._stdout)
raise AnsiblePromptInterrupt('user interrupt')
if (complete_input is None and key_pressed in (b'\r', b'\n')) or (complete_input is not None and key_pressed.lower() in complete_input):
clear_line(self._stdout)
break
elif key_pressed in backspace_sequences:
clear_line(self._stdout)
result_string = result_string[:-1]
if echo:
self._stdout.write(result_string)
self._stdout.flush()
else:
result_string += key_pressed
return result_string
@property
def _stdin(self) -> t.BinaryIO | None:
if self._final_q:
raise NotImplementedError
try:
return sys.stdin.buffer
except AttributeError:
return None
@property
def _stdin_fd(self) -> int | None:
try:
return self._stdin.fileno()
except (ValueError, AttributeError):
return None
@property
def _stdout(self) -> t.BinaryIO:
if self._final_q:
raise NotImplementedError
return sys.stdout.buffer
@property
def _stdout_fd(self) -> int | None:
try:
return self._stdout.fileno()
except (ValueError, AttributeError):
return None
| 32,856
|
Python
|
.py
| 730
| 35.183562
| 151
| 0.605497
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,113
|
unsafe_proxy.py
|
ansible_ansible/lib/ansible/utils/unsafe_proxy.py
|
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
# Original Python Recipe for Proxy:
# http://code.activestate.com/recipes/496741-object-proxying/
# Author: Tomer Filiba
from __future__ import annotations
from collections.abc import Mapping, Set
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.six import binary_type, text_type
from ansible.utils.native_jinja import NativeJinjaText
__all__ = ['AnsibleUnsafe', 'wrap_var']
class AnsibleUnsafe(object):
__UNSAFE__ = True
class AnsibleUnsafeBytes(binary_type, AnsibleUnsafe):
def decode(self, *args, **kwargs):
"""Wrapper method to ensure type conversions maintain unsafe context"""
return AnsibleUnsafeText(super(AnsibleUnsafeBytes, self).decode(*args, **kwargs))
class AnsibleUnsafeText(text_type, AnsibleUnsafe):
def encode(self, *args, **kwargs):
"""Wrapper method to ensure type conversions maintain unsafe context"""
return AnsibleUnsafeBytes(super(AnsibleUnsafeText, self).encode(*args, **kwargs))
class NativeJinjaUnsafeText(NativeJinjaText, AnsibleUnsafeText):
pass
def _wrap_dict(v):
return dict((wrap_var(k), wrap_var(item)) for k, item in v.items())
def _wrap_sequence(v):
"""Wraps a sequence with unsafe, not meant for strings, primarily
``tuple`` and ``list``
"""
v_type = type(v)
return v_type(wrap_var(item) for item in v)
def _wrap_set(v):
return set(wrap_var(item) for item in v)
def wrap_var(v):
if v is None or isinstance(v, AnsibleUnsafe):
return v
if isinstance(v, Mapping):
v = _wrap_dict(v)
elif isinstance(v, Set):
v = _wrap_set(v)
elif is_sequence(v):
v = _wrap_sequence(v)
elif isinstance(v, NativeJinjaText):
v = NativeJinjaUnsafeText(v)
elif isinstance(v, binary_type):
v = AnsibleUnsafeBytes(v)
elif isinstance(v, text_type):
v = AnsibleUnsafeText(v)
return v
def to_unsafe_bytes(*args, **kwargs):
return wrap_var(to_bytes(*args, **kwargs))
def to_unsafe_text(*args, **kwargs):
return wrap_var(to_text(*args, **kwargs))
| 4,694
|
Python
|
.py
| 100
| 43.95
| 89
| 0.745566
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,114
|
py3compat.py
|
ansible_ansible/lib/ansible/utils/py3compat.py
|
# -*- coding: utf-8 -*-
#
# (c) 2018, Toshio Kuratomi <a.badger@gmail.com>
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import inspect
import os
from ansible.utils.display import Display
display = Display()
def __getattr__(name):
if name != 'environ':
raise AttributeError(name)
caller = inspect.stack()[1]
display.deprecated(
(
'ansible.utils.py3compat.environ is deprecated in favor of os.environ. '
f'Accessed by {caller.filename} line number {caller.lineno}'
),
version='2.20',
)
return os.environ
| 720
|
Python
|
.py
| 22
| 27.818182
| 92
| 0.677326
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,115
|
__init__.py
|
ansible_ansible/lib/ansible/utils/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
| 749
|
Python
|
.py
| 17
| 43
| 70
| 0.77565
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,116
|
sentinel.py
|
ansible_ansible/lib/ansible/utils/sentinel.py
|
# -*- coding: utf-8 -*-
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
# For Backward compatibility
from ansible.module_utils.common.sentinel import Sentinel # pylint: disable=unused-import
| 323
|
Python
|
.py
| 6
| 52.5
| 92
| 0.774603
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,117
|
color.py
|
ansible_ansible/lib/ansible/utils/color.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import re
import sys
from ansible import constants as C
ANSIBLE_COLOR = True
if C.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR = False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR = False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR = False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR = False
if C.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR = True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
def parsecolor(color):
"""SGR parameter string for the specified color name."""
matches = re.match(r"color(?P<color>[0-9]+)"
r"|(?P<rgb>rgb(?P<red>[0-5])(?P<green>[0-5])(?P<blue>[0-5]))"
r"|gray(?P<gray>[0-9]+)", color)
if not matches:
return C.COLOR_CODES[color]
if matches.group('color'):
return u'38;5;%d' % int(matches.group('color'))
if matches.group('rgb'):
return u'38;5;%d' % (16 + 36 * int(matches.group('red')) +
6 * int(matches.group('green')) +
int(matches.group('blue')))
if matches.group('gray'):
return u'38;5;%d' % (232 + int(matches.group('gray')))
def stringc(text, color, wrap_nonvisible_chars=False):
"""String in color."""
if ANSIBLE_COLOR:
color_code = parsecolor(color)
fmt = u"\033[%sm%s\033[0m"
if wrap_nonvisible_chars:
# This option is provided for use in cases when the
# formatting of a command line prompt is needed, such as
# `ansible-console`. As said in `readline` sources:
# readline/display.c:321
# /* Current implementation:
# \001 (^A) start non-visible characters
# \002 (^B) end non-visible characters
# all characters except \001 and \002 (following a \001) are copied to
# the returned string; all characters except those between \001 and
# \002 are assumed to be `visible'. */
fmt = u"\001\033[%sm\002%s\001\033[0m\002"
return u"\n".join([fmt % (color_code, t) for t in text.split(u'\n')])
else:
return text
def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
s = u"%s=%-4s" % (lead, str(num))
if num != 0 and ANSIBLE_COLOR and color is not None:
s = stringc(s, color)
return s
def hostcolor(host, stats, color=True):
if ANSIBLE_COLOR and color:
if stats['failures'] != 0 or stats['unreachable'] != 0:
return u"%-37s" % stringc(host, C.COLOR_ERROR)
elif stats['changed'] != 0:
return u"%-37s" % stringc(host, C.COLOR_CHANGED)
else:
return u"%-37s" % stringc(host, C.COLOR_OK)
return u"%-26s" % host
| 4,057
|
Python
|
.py
| 97
| 35.237113
| 85
| 0.630005
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,118
|
singleton.py
|
ansible_ansible/lib/ansible/utils/singleton.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from threading import RLock
class Singleton(type):
"""Metaclass for classes that wish to implement Singleton
functionality. If an instance of the class exists, it's returned,
otherwise a single instance is instantiated and returned.
"""
def __init__(cls, name, bases, dct):
super(Singleton, cls).__init__(name, bases, dct)
cls.__instance = None
cls.__rlock = RLock()
def __call__(cls, *args, **kw):
if cls.__instance is not None:
return cls.__instance
with cls.__rlock:
if cls.__instance is None:
cls.__instance = super(Singleton, cls).__call__(*args, **kw)
return cls.__instance
| 865
|
Python
|
.py
| 20
| 36.3
| 92
| 0.640811
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,119
|
ssh_functions.py
|
ansible_ansible/lib/ansible/utils/ssh_functions.py
|
# (c) 2016, James Tanner
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import subprocess
from ansible import constants as C
from ansible.module_utils.common.text.converters import to_bytes
from ansible.module_utils.compat.paramiko import paramiko
from ansible.utils.display import Display
display = Display()
_HAS_CONTROLPERSIST = {} # type: dict[str, bool]
def check_for_controlpersist(ssh_executable):
try:
# If we've already checked this executable
return _HAS_CONTROLPERSIST[ssh_executable]
except KeyError:
pass
b_ssh_exec = to_bytes(ssh_executable, errors='surrogate_or_strict')
has_cp = True
try:
cmd = subprocess.Popen([b_ssh_exec, '-o', 'ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if b"Bad configuration option" in err or b"Usage:" in err:
has_cp = False
except OSError:
has_cp = False
_HAS_CONTROLPERSIST[ssh_executable] = has_cp
return has_cp
def set_default_transport():
# deal with 'smart' connection .. one time ..
if C.DEFAULT_TRANSPORT == 'smart':
display.deprecated("The 'smart' option for connections is deprecated. Set the connection plugin directly instead.", version='2.20')
# see if SSH can support ControlPersist if not use paramiko
if not check_for_controlpersist('ssh') and paramiko is not None:
C.DEFAULT_TRANSPORT = "paramiko"
else:
C.DEFAULT_TRANSPORT = "ssh"
| 2,216
|
Python
|
.py
| 51
| 38.960784
| 139
| 0.721525
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,120
|
helpers.py
|
ansible_ansible/lib/ansible/utils/helpers.py
|
# (c) 2016, Ansible by Red Hat <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.module_utils.six import string_types
def pct_to_int(value, num_items, min_value=1):
"""
Converts a given value to a percentage if specified as "x%",
otherwise converts the given value to an integer.
"""
if isinstance(value, string_types) and value.endswith('%'):
value_pct = int(value.replace("%", ""))
return int((value_pct / 100.0) * num_items) or min_value
else:
return int(value)
def object_to_dict(obj, exclude=None):
"""
Converts an object into a dict making the properties into keys, allows excluding certain keys
"""
if exclude is None or not isinstance(exclude, list):
exclude = []
return dict((key, getattr(obj, key)) for key in dir(obj) if not (key.startswith('_') or key in exclude))
def deduplicate_list(original_list):
"""
Creates a deduplicated list with the order in which each item is first found.
"""
seen = set()
return [x for x in original_list if x not in seen and not seen.add(x)]
| 1,759
|
Python
|
.py
| 41
| 39.365854
| 108
| 0.71462
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,121
|
shlex.py
|
ansible_ansible/lib/ansible/utils/shlex.py
|
# (c) 2015, Marius Gedminas <marius@gedmin.as>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# alongwith Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import shlex
# shlex.split() wants Unicode (i.e. ``str``) input on Python 3
shlex_split = shlex.split
| 841
|
Python
|
.py
| 20
| 40.85
| 70
| 0.76989
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,122
|
context_objects.py
|
ansible_ansible/lib/ansible/utils/context_objects.py
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Hold command line arguments for use in other modules
"""
from __future__ import annotations
from abc import ABCMeta
from collections.abc import Container, Mapping, Sequence, Set
from ansible.module_utils.common.collections import ImmutableDict
from ansible.module_utils.six import add_metaclass, binary_type, text_type
from ansible.utils.singleton import Singleton
def _make_immutable(obj):
"""Recursively convert a container and objects inside of it into immutable data types"""
if isinstance(obj, (text_type, binary_type)):
# Strings first because they are also sequences
return obj
elif isinstance(obj, Mapping):
temp_dict = {}
for key, value in obj.items():
if isinstance(value, Container):
temp_dict[key] = _make_immutable(value)
else:
temp_dict[key] = value
return ImmutableDict(temp_dict)
elif isinstance(obj, Set):
temp_set = set()
for value in obj:
if isinstance(value, Container):
temp_set.add(_make_immutable(value))
else:
temp_set.add(value)
return frozenset(temp_set)
elif isinstance(obj, Sequence):
temp_sequence = []
for value in obj:
if isinstance(value, Container):
temp_sequence.append(_make_immutable(value))
else:
temp_sequence.append(value)
return tuple(temp_sequence)
return obj
class _ABCSingleton(Singleton, ABCMeta):
"""
Combine ABCMeta based classes with Singleton based classes
Combine Singleton and ABCMeta so we have a metaclass that unambiguously knows which can override
the other. Useful for making new types of containers which are also Singletons.
"""
pass
class CLIArgs(ImmutableDict):
"""
Hold a parsed copy of cli arguments
We have both this non-Singleton version and the Singleton, GlobalCLIArgs, version to leave us
room to implement a Context object in the future. Whereas there should only be one set of args
in a global context, individual Context objects might want to pretend that they have different
command line switches to trigger different behaviour when they run. So if we support Contexts
in the future, they would use CLIArgs instead of GlobalCLIArgs to store their version of command
line flags.
"""
def __init__(self, mapping):
toplevel = {}
for key, value in mapping.items():
toplevel[key] = _make_immutable(value)
super(CLIArgs, self).__init__(toplevel)
@classmethod
def from_options(cls, options):
return cls(vars(options))
@add_metaclass(_ABCSingleton)
class GlobalCLIArgs(CLIArgs):
"""
Globally hold a parsed copy of cli arguments.
Only one of these exist per program as it is for global context
"""
pass
| 3,034
|
Python
|
.py
| 73
| 34.643836
| 100
| 0.686927
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,123
|
version.py
|
ansible_ansible/lib/ansible/utils/version.py
|
# Copyright (c) 2020 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import re
from ansible.module_utils.compat.version import LooseVersion, Version
# Regular expression taken from
# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
SEMVER_RE = re.compile(
r"""
^
(?P<major>0|[1-9]\d*)
\.
(?P<minor>0|[1-9]\d*)
\.
(?P<patch>0|[1-9]\d*)
(?:
-
(?P<prerelease>
(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)
(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*
)
)?
(?:
\+
(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*)
)?
$
""",
flags=re.X
)
class _Alpha:
"""Class to easily allow comparing strings
Largely this exists to make comparing an integer and a string on py3
so that it works like py2.
"""
def __init__(self, specifier):
self.specifier = specifier
def __repr__(self):
return repr(self.specifier)
def __eq__(self, other):
if isinstance(other, _Alpha):
return self.specifier == other.specifier
elif isinstance(other, str):
return self.specifier == other
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, _Alpha):
return self.specifier < other.specifier
elif isinstance(other, str):
return self.specifier < other
elif isinstance(other, _Numeric):
return False
raise ValueError
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
class _Numeric:
"""Class to easily allow comparing numbers
Largely this exists to make comparing an integer and a string on py3
so that it works like py2.
"""
def __init__(self, specifier):
self.specifier = int(specifier)
def __repr__(self):
return repr(self.specifier)
def __eq__(self, other):
if isinstance(other, _Numeric):
return self.specifier == other.specifier
elif isinstance(other, int):
return self.specifier == other
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, _Numeric):
return self.specifier < other.specifier
elif isinstance(other, int):
return self.specifier < other
elif isinstance(other, _Alpha):
return True
raise ValueError
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
class SemanticVersion(Version):
"""Version comparison class that implements Semantic Versioning 2.0.0
Based off of ``distutils.version.Version``
"""
version_re = SEMVER_RE
def __init__(self, vstring=None):
self.vstring = vstring
self.major = None
self.minor = None
self.patch = None
self.prerelease = ()
self.buildmetadata = ()
if vstring:
self.parse(vstring)
def __repr__(self):
return 'SemanticVersion(%r)' % self.vstring
@staticmethod
def from_loose_version(loose_version):
"""This method is designed to take a ``LooseVersion``
and attempt to construct a ``SemanticVersion`` from it
This is useful where you want to do simple version math
without requiring users to provide a compliant semver.
"""
if not isinstance(loose_version, LooseVersion):
raise ValueError("%r is not a LooseVersion" % loose_version)
try:
version = loose_version.version[:]
except AttributeError:
raise ValueError("%r is not a LooseVersion" % loose_version)
extra_idx = 3
for marker in ('-', '+'):
try:
idx = version.index(marker)
except ValueError:
continue
else:
if idx < extra_idx:
extra_idx = idx
version = version[:extra_idx]
if version and set(type(v) for v in version) != set((int,)):
raise ValueError("Non integer values in %r" % loose_version)
# Extra is everything to the right of the core version
extra = re.search('[+-].+$', loose_version.vstring)
version = version + [0] * (3 - len(version))
return SemanticVersion(
'%s%s' % (
'.'.join(str(v) for v in version),
extra.group(0) if extra else ''
)
)
def parse(self, vstring):
match = SEMVER_RE.match(vstring)
if not match:
raise ValueError("invalid semantic version '%s'" % vstring)
(major, minor, patch, prerelease, buildmetadata) = match.group(1, 2, 3, 4, 5)
self.vstring = vstring
self.major = int(major)
self.minor = int(minor)
self.patch = int(patch)
if prerelease:
self.prerelease = tuple(_Numeric(x) if x.isdigit() else _Alpha(x) for x in prerelease.split('.'))
if buildmetadata:
self.buildmetadata = tuple(_Numeric(x) if x.isdigit() else _Alpha(x) for x in buildmetadata.split('.'))
@property
def core(self):
return self.major, self.minor, self.patch
@property
def is_prerelease(self):
return bool(self.prerelease)
@property
def is_stable(self):
# Major version zero (0.y.z) is for initial development. Anything MAY change at any time.
# The public API SHOULD NOT be considered stable.
# https://semver.org/#spec-item-4
return not (self.major == 0 or self.is_prerelease)
def _cmp(self, other):
if isinstance(other, str):
other = SemanticVersion(other)
if self.core != other.core:
# if the core version doesn't match
# prerelease and buildmetadata doesn't matter
if self.core < other.core:
return -1
else:
return 1
if not any((self.prerelease, other.prerelease)):
return 0
if self.prerelease and not other.prerelease:
return -1
elif not self.prerelease and other.prerelease:
return 1
else:
if self.prerelease < other.prerelease:
return -1
elif self.prerelease > other.prerelease:
return 1
# Build metadata MUST be ignored when determining version precedence
# https://semver.org/#spec-item-10
# With the above in mind it is ignored here
# If we have made it here, things should be equal
return 0
# The Py2 and Py3 implementations of distutils.version.Version
# are quite different, this makes the Py2 and Py3 implementations
# the same
def __eq__(self, other):
return self._cmp(other) == 0
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self._cmp(other) < 0
def __le__(self, other):
return self._cmp(other) <= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __ge__(self, other):
return self._cmp(other) >= 0
| 7,736
|
Python
|
.py
| 208
| 28.1875
| 115
| 0.579215
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,124
|
cmd_functions.py
|
ansible_ansible/lib/ansible/utils/cmd_functions.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import select
import shlex
import subprocess
import sys
from ansible.module_utils.common.text.converters import to_bytes
def run_cmd(cmd, live=False, readsize=10):
cmdargs = shlex.split(cmd)
# subprocess should be passed byte strings.
cmdargs = [to_bytes(a, errors='surrogate_or_strict') for a in cmdargs]
p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = b''
stderr = b''
rpipes = [p.stdout, p.stderr]
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), readsize)
if live:
sys.stdout.buffer.write(dat)
stdout += dat
if dat == b'':
rpipes.remove(p.stdout)
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), readsize)
stderr += dat
if live:
sys.stdout.buffer.write(dat)
if dat == b'':
rpipes.remove(p.stderr)
# only break out if we've emptied the pipes, or there is nothing to
# read from and the process has finished.
if (not rpipes or not rfd) and p.poll() is not None:
break
# Calling wait while there are still pipes to read can cause a lock
elif not rpipes and p.poll() is None:
p.wait()
return p.returncode, stdout, stderr
| 2,180
|
Python
|
.py
| 55
| 33.290909
| 81
| 0.663357
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,125
|
native_jinja.py
|
ansible_ansible/lib/ansible/utils/native_jinja.py
|
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.module_utils.six import text_type
class NativeJinjaText(text_type):
pass
| 262
|
Python
|
.py
| 6
| 41.166667
| 92
| 0.772908
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,126
|
multiprocessing.py
|
ansible_ansible/lib/ansible/utils/multiprocessing.py
|
# Copyright (c) 2019 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import multiprocessing
# Explicit multiprocessing context using the fork start method
# This exists as a compat layer now that Python3.8 has changed the default
# start method for macOS to ``spawn`` which is incompatible with our
# code base currently
#
# This exists in utils to allow it to be easily imported into various places
# without causing circular import or dependency problems
context = multiprocessing.get_context('fork')
| 614
|
Python
|
.py
| 12
| 49.916667
| 92
| 0.796327
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,127
|
_collection_config.py
|
ansible_ansible/lib/ansible/utils/collection_loader/_collection_config.py
|
# (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# CAUTION: This implementation of the collection loader is used by ansible-test.
# Because of this, it must be compatible with all Python versions supported on the controller or remote.
from __future__ import annotations
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import add_metaclass
class _EventSource:
def __init__(self):
self._handlers = set()
def __iadd__(self, handler):
if not callable(handler):
raise ValueError('handler must be callable')
self._handlers.add(handler)
return self
def __isub__(self, handler):
try:
self._handlers.remove(handler)
except KeyError:
pass
return self
def _on_exception(self, handler, exc, *args, **kwargs):
# if we return True, we want the caller to re-raise
return True
def fire(self, *args, **kwargs):
for h in self._handlers:
try:
h(*args, **kwargs)
except Exception as ex:
if self._on_exception(h, ex, *args, **kwargs):
raise
class _AnsibleCollectionConfig(type):
def __init__(cls, meta, name, bases):
cls._collection_finder = None
cls._default_collection = None
cls._on_collection_load = _EventSource()
@property
def collection_finder(cls):
return cls._collection_finder
@collection_finder.setter
def collection_finder(cls, value):
if cls._collection_finder:
raise ValueError('an AnsibleCollectionFinder has already been configured')
cls._collection_finder = value
@property
def collection_paths(cls):
cls._require_finder()
return [to_text(p) for p in cls._collection_finder._n_collection_paths]
@property
def default_collection(cls):
return cls._default_collection
@default_collection.setter
def default_collection(cls, value):
cls._default_collection = value
@property
def on_collection_load(cls):
return cls._on_collection_load
@on_collection_load.setter
def on_collection_load(cls, value):
if value is not cls._on_collection_load:
raise ValueError('on_collection_load is not directly settable (use +=)')
@property
def playbook_paths(cls):
cls._require_finder()
return [to_text(p) for p in cls._collection_finder._n_playbook_paths]
@playbook_paths.setter
def playbook_paths(cls, value):
cls._require_finder()
cls._collection_finder.set_playbook_paths(value)
def _require_finder(cls):
if not cls._collection_finder:
raise NotImplementedError('an AnsibleCollectionFinder has not been installed in this process')
# concrete class of our metaclass type that defines the class properties we want
@add_metaclass(_AnsibleCollectionConfig)
class AnsibleCollectionConfig(object):
pass
| 3,094
|
Python
|
.py
| 76
| 33.210526
| 113
| 0.668115
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,128
|
_collection_meta.py
|
ansible_ansible/lib/ansible/utils/collection_loader/_collection_meta.py
|
# (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# CAUTION: This implementation of the collection loader is used by ansible-test.
# Because of this, it must be compatible with all Python versions supported on the controller or remote.
from __future__ import annotations
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping # type: ignore[no-redef,attr-defined] # pylint: disable=ansible-bad-import-from
from ansible.module_utils.common.yaml import yaml_load
def _meta_yml_to_dict(yaml_string_data, content_id):
"""
Converts string YAML dictionary to a Python dictionary. This function may be monkeypatched to another implementation
by some tools (eg the import sanity test).
:param yaml_string_data: a bytes-ish YAML dictionary
:param content_id: a unique ID representing the content to allow other implementations to cache the output
:return: a Python dictionary representing the YAML dictionary content
"""
# NB: content_id is passed in, but not used by this implementation
routing_dict = yaml_load(yaml_string_data)
if not routing_dict:
routing_dict = {}
if not isinstance(routing_dict, Mapping):
raise ValueError('collection metadata must be an instance of Python Mapping')
return routing_dict
| 1,398
|
Python
|
.py
| 25
| 51.8
| 120
| 0.756401
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,129
|
_collection_finder.py
|
ansible_ansible/lib/ansible/utils/collection_loader/_collection_finder.py
|
# (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# CAUTION: This implementation of the collection loader is used by ansible-test.
# Because of this, it must be compatible with all Python versions supported on the controller or remote.
from __future__ import annotations
import itertools
import os
import os.path
import re
import sys
from keyword import iskeyword
# DO NOT add new non-stdlib import deps here, this loader is used by external tools (eg ansible-test import sanity)
# that only allow stdlib and module_utils
from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
from ._collection_config import AnsibleCollectionConfig
from contextlib import contextmanager
from types import ModuleType
try:
from importlib import import_module
except ImportError:
def import_module(name): # type: ignore[misc]
__import__(name)
return sys.modules[name]
from importlib import reload as reload_module
try:
try:
# Available on Python >= 3.11
# We ignore the import error that will trigger when running mypy with
# older Python versions.
from importlib.resources.abc import TraversableResources # type: ignore[import]
except ImportError:
# Used with Python 3.9 and 3.10 only
# This member is still available as an alias up until Python 3.14 but
# is deprecated as of Python 3.12.
from importlib.abc import TraversableResources # deprecated: description='TraversableResources move' python_version='3.10'
except ImportError:
# Python < 3.9
# deprecated: description='TraversableResources fallback' python_version='3.8'
TraversableResources = object # type: ignore[assignment,misc]
try:
from importlib.util import find_spec, spec_from_loader
except ImportError:
pass
try:
from importlib.machinery import FileFinder
except ImportError:
HAS_FILE_FINDER = False
else:
HAS_FILE_FINDER = True
try:
import pathlib
except ImportError:
pass
# NB: this supports import sanity test providing a different impl
try:
from ._collection_meta import _meta_yml_to_dict
except ImportError:
_meta_yml_to_dict = None
is_python_identifier = str.isidentifier # type: ignore[attr-defined]
PB_EXTENSIONS = ('.yml', '.yaml')
SYNTHETIC_PACKAGE_NAME = '<ansible_synthetic_collection_package>'
class _AnsibleNSTraversable:
"""Class that implements the ``importlib.resources.abc.Traversable``
interface for the following ``ansible_collections`` namespace packages::
* ``ansible_collections``
* ``ansible_collections.<namespace>``
These namespace packages operate differently from a normal Python
namespace package, in that the same namespace can be distributed across
multiple directories on the filesystem and still function as a single
namespace, such as::
* ``/usr/share/ansible/collections/ansible_collections/ansible/posix/``
* ``/home/user/.ansible/collections/ansible_collections/ansible/windows/``
This class will mimic the behavior of various ``pathlib.Path`` methods,
by combining the results of multiple root paths into the output.
This class does not do anything to remove duplicate collections from the
list, so when traversing either namespace patterns supported by this class,
it is possible to have the same collection located in multiple root paths,
but precedence rules only use one. When iterating or traversing these
package roots, there is the potential to see the same collection in
multiple places without indication of which would be used. In such a
circumstance, it is best to then call ``importlib.resources.files`` for an
individual collection package rather than continuing to traverse from the
namespace package.
Several methods will raise ``NotImplementedError`` as they do not make
sense for these namespace packages.
"""
def __init__(self, *paths):
self._paths = [pathlib.Path(p) for p in paths]
def __repr__(self):
return "_AnsibleNSTraversable('%s')" % "', '".join(map(to_text, self._paths))
def iterdir(self):
return itertools.chain.from_iterable(p.iterdir() for p in self._paths if p.is_dir())
def is_dir(self):
return any(p.is_dir() for p in self._paths)
def is_file(self):
return False
def glob(self, pattern):
return itertools.chain.from_iterable(p.glob(pattern) for p in self._paths if p.is_dir())
def _not_implemented(self, *args, **kwargs):
raise NotImplementedError('not usable on namespaces')
joinpath = __truediv__ = read_bytes = read_text = _not_implemented
class _AnsibleTraversableResources(TraversableResources):
"""Implements ``importlib.resources.abc.TraversableResources`` for the
collection Python loaders.
The result of ``files`` will depend on whether a particular collection, or
a sub package of a collection was referenced, as opposed to
``ansible_collections`` or a particular namespace. For a collection and
its subpackages, a ``pathlib.Path`` instance will be returned, whereas
for the higher level namespace packages, ``_AnsibleNSTraversable``
will be returned.
"""
def __init__(self, package, loader):
self._package = package
self._loader = loader
def _get_name(self, package):
try:
# spec
return package.name
except AttributeError:
# module
return package.__name__
def _get_package(self, package):
try:
# spec
return package.__parent__
except AttributeError:
# module
return package.__package__
def _get_path(self, package):
try:
# spec
return package.origin
except AttributeError:
# module
return package.__file__
def _is_ansible_ns_package(self, package):
origin = getattr(package, 'origin', None)
if not origin:
return False
if origin == SYNTHETIC_PACKAGE_NAME:
return True
module_filename = os.path.basename(origin)
return module_filename in {'__synthetic__', '__init__.py'}
def _ensure_package(self, package):
if self._is_ansible_ns_package(package):
# Short circuit our loaders
return
if self._get_package(package) != package.__name__:
raise TypeError('%r is not a package' % package.__name__)
def files(self):
package = self._package
parts = package.split('.')
is_ns = parts[0] == 'ansible_collections' and len(parts) < 3
if isinstance(package, str):
if is_ns:
# Don't use ``spec_from_loader`` here, because that will point
# to exactly 1 location for a namespace. Use ``find_spec``
# to get a list of all locations for the namespace
package = find_spec(package)
else:
package = spec_from_loader(package, self._loader)
elif not isinstance(package, ModuleType):
raise TypeError('Expected string or module, got %r' % package.__class__.__name__)
self._ensure_package(package)
if is_ns:
return _AnsibleNSTraversable(*package.submodule_search_locations)
return pathlib.Path(self._get_path(package)).parent
class _AnsibleCollectionFinder:
def __init__(self, paths=None, scan_sys_paths=True):
# TODO: accept metadata loader override
self._ansible_pkg_path = to_native(os.path.dirname(to_bytes(sys.modules['ansible'].__file__)))
if isinstance(paths, str):
paths = [paths]
elif paths is None:
paths = []
# expand any placeholders in configured paths
paths = [os.path.expanduser(to_native(p, errors='surrogate_or_strict')) for p in paths]
# add syspaths if needed
if scan_sys_paths:
paths.extend(sys.path)
good_paths = []
# expand any placeholders in configured paths
for p in paths:
# ensure we always have ansible_collections
if os.path.basename(p) == 'ansible_collections':
p = os.path.dirname(p)
if p not in good_paths and os.path.isdir(to_bytes(os.path.join(p, 'ansible_collections'), errors='surrogate_or_strict')):
good_paths.append(p)
self._n_configured_paths = good_paths
self._n_cached_collection_paths = None
self._n_cached_collection_qualified_paths = None
self._n_playbook_paths = []
@classmethod
def _remove(cls):
for mps in sys.meta_path:
if isinstance(mps, _AnsibleCollectionFinder):
sys.meta_path.remove(mps)
# remove any path hooks that look like ours
for ph in sys.path_hooks:
if hasattr(ph, '__self__') and isinstance(ph.__self__, _AnsibleCollectionFinder):
sys.path_hooks.remove(ph)
# zap any cached path importer cache entries that might refer to us
sys.path_importer_cache.clear()
AnsibleCollectionConfig._collection_finder = None
# validate via the public property that we really killed it
if AnsibleCollectionConfig.collection_finder is not None:
raise AssertionError('_AnsibleCollectionFinder remove did not reset AnsibleCollectionConfig.collection_finder')
def _install(self):
self._remove()
sys.meta_path.insert(0, self)
sys.path_hooks.insert(0, self._ansible_collection_path_hook)
AnsibleCollectionConfig.collection_finder = self
def _ansible_collection_path_hook(self, path):
path = to_native(path)
interesting_paths = self._n_cached_collection_qualified_paths
if not interesting_paths:
interesting_paths = []
for p in self._n_collection_paths:
if os.path.basename(p) != 'ansible_collections':
p = os.path.join(p, 'ansible_collections')
if p not in interesting_paths:
interesting_paths.append(p)
interesting_paths.insert(0, self._ansible_pkg_path)
self._n_cached_collection_qualified_paths = interesting_paths
if any(path.startswith(p) for p in interesting_paths):
return _AnsiblePathHookFinder(self, path)
raise ImportError('not interested')
@property
def _n_collection_paths(self):
paths = self._n_cached_collection_paths
if not paths:
self._n_cached_collection_paths = paths = self._n_playbook_paths + self._n_configured_paths
return paths
def set_playbook_paths(self, playbook_paths):
if isinstance(playbook_paths, str):
playbook_paths = [playbook_paths]
# track visited paths; we have to preserve the dir order as-passed in case there are duplicate collections (first one wins)
added_paths = set()
# de-dupe
self._n_playbook_paths = [os.path.join(to_native(p), 'collections') for p in playbook_paths if not (p in added_paths or added_paths.add(p))]
self._n_cached_collection_paths = None
# HACK: playbook CLI sets this relatively late, so we've already loaded some packages whose paths might depend on this. Fix those up.
# NB: this should NOT be used for late additions; ideally we'd fix the playbook dir setup earlier in Ansible init
# to prevent this from occurring
for pkg in ['ansible_collections', 'ansible_collections.ansible']:
self._reload_hack(pkg)
def _reload_hack(self, fullname):
m = sys.modules.get(fullname)
if not m:
return
reload_module(m)
def _get_loader(self, fullname, path=None):
split_name = fullname.split('.')
toplevel_pkg = split_name[0]
module_to_find = split_name[-1]
part_count = len(split_name)
if toplevel_pkg not in ['ansible', 'ansible_collections']:
# not interested in anything other than ansible_collections (and limited cases under ansible)
return None
# sanity check what we're getting from import, canonicalize path values
if part_count == 1:
if path:
raise ValueError('path should not be specified for top-level packages (trying to find {0})'.format(fullname))
else:
# seed the path to the configured collection roots
path = self._n_collection_paths
if part_count > 1 and path is None:
raise ValueError('path must be specified for subpackages (trying to find {0})'.format(fullname))
if toplevel_pkg == 'ansible':
# something under the ansible package, delegate to our internal loader in case of redirections
initialize_loader = _AnsibleInternalRedirectLoader
elif part_count == 1:
initialize_loader = _AnsibleCollectionRootPkgLoader
elif part_count == 2: # ns pkg eg, ansible_collections, ansible_collections.somens
initialize_loader = _AnsibleCollectionNSPkgLoader
elif part_count == 3: # collection pkg eg, ansible_collections.somens.somecoll
initialize_loader = _AnsibleCollectionPkgLoader
else:
# anything below the collection
initialize_loader = _AnsibleCollectionLoader
# NB: actual "find"ing is delegated to the constructors on the various loaders; they'll ImportError if not found
try:
return initialize_loader(fullname=fullname, path_list=path)
except ImportError:
# TODO: log attempt to load context
return None
def find_module(self, fullname, path=None):
# Figure out what's being asked for, and delegate to a special-purpose loader
return self._get_loader(fullname, path)
def find_spec(self, fullname, path, target=None):
loader = self._get_loader(fullname, path)
if loader is None:
return None
spec = spec_from_loader(fullname, loader)
if spec is not None and hasattr(loader, '_subpackage_search_paths'):
spec.submodule_search_locations = loader._subpackage_search_paths
return spec
# Implements a path_hook finder for iter_modules (since it's only path based). This finder does not need to actually
# function as a finder in most cases, since our meta_path finder is consulted first for *almost* everything, except
# pkgutil.iter_modules, and under py2, pkgutil.get_data if the parent package passed has not been loaded yet.
class _AnsiblePathHookFinder:
def __init__(self, collection_finder, pathctx):
# when called from a path_hook, find_module doesn't usually get the path arg, so this provides our context
self._pathctx = to_native(pathctx)
self._collection_finder = collection_finder
# cache the native FileFinder (take advantage of its filesystem cache for future find/load requests)
self._file_finder = None
# class init is fun- this method has a self arg that won't get used
def _get_filefinder_path_hook(self=None):
_file_finder_hook = None
# try to find the FileFinder hook to call for fallback path-based imports in Py3
_file_finder_hook = [ph for ph in sys.path_hooks if 'FileFinder' in repr(ph)]
if len(_file_finder_hook) != 1:
raise Exception('need exactly one FileFinder import hook (found {0})'.format(len(_file_finder_hook)))
_file_finder_hook = _file_finder_hook[0]
return _file_finder_hook
_filefinder_path_hook = _get_filefinder_path_hook()
def _get_finder(self, fullname):
split_name = fullname.split('.')
toplevel_pkg = split_name[0]
if toplevel_pkg == 'ansible_collections':
# collections content? delegate to the collection finder
return self._collection_finder
else:
# Something else; we'd normally restrict this to `ansible` descendent modules so that any weird loader
# behavior that arbitrary Python modules have can be serviced by those loaders. In some dev/test
# scenarios (eg a venv under a collection) our path_hook signs us up to load non-Ansible things, and
# it's too late by the time we've reached this point, but also too expensive for the path_hook to figure
# out what we *shouldn't* be loading with the limited info it has. So we'll just delegate to the
# normal path-based loader as best we can to service it. This also allows us to take advantage of Python's
# built-in FS caching and byte-compilation for most things.
# create or consult our cached file finder for this path
if not self._file_finder:
try:
self._file_finder = _AnsiblePathHookFinder._filefinder_path_hook(self._pathctx)
except ImportError:
# FUTURE: log at a high logging level? This is normal for things like python36.zip on the path, but
# might not be in some other situation...
return None
return self._file_finder
def find_module(self, fullname, path=None):
# we ignore the passed in path here- use what we got from the path hook init
finder = self._get_finder(fullname)
if finder is None:
return None
elif HAS_FILE_FINDER and isinstance(finder, FileFinder):
# this codepath is erroneously used under some cases in py3,
# and the find_module method on FileFinder does not accept the path arg
# see https://github.com/pypa/setuptools/pull/2918
return finder.find_module(fullname)
else:
return finder.find_module(fullname, path=[self._pathctx])
def find_spec(self, fullname, target=None):
split_name = fullname.split('.')
toplevel_pkg = split_name[0]
finder = self._get_finder(fullname)
if finder is None:
return None
elif toplevel_pkg == 'ansible_collections':
return finder.find_spec(fullname, path=[self._pathctx])
else:
return finder.find_spec(fullname)
def iter_modules(self, prefix):
# NB: this currently represents only what's on disk, and does not handle package redirection
return _iter_modules_impl([self._pathctx], prefix)
def __repr__(self):
return "{0}(path='{1}')".format(self.__class__.__name__, self._pathctx)
class _AnsibleCollectionPkgLoaderBase:
_allows_package_code = False
def __init__(self, fullname, path_list=None):
self._fullname = fullname
self._redirect_module = None
self._split_name = fullname.split('.')
self._rpart_name = fullname.rpartition('.')
self._parent_package_name = self._rpart_name[0] # eg ansible_collections for ansible_collections.somens, '' for toplevel
self._package_to_load = self._rpart_name[2] # eg somens for ansible_collections.somens
self._source_code_path = None
self._decoded_source = None
self._compiled_code = None
self._validate_args()
self._candidate_paths = self._get_candidate_paths([to_native(p) for p in path_list])
self._subpackage_search_paths = self._get_subpackage_search_paths(self._candidate_paths)
self._validate_final()
# allow subclasses to validate args and sniff split values before we start digging around
def _validate_args(self):
if self._split_name[0] != 'ansible_collections':
raise ImportError('this loader can only load packages from the ansible_collections package, not {0}'.format(self._fullname))
# allow subclasses to customize candidate path filtering
def _get_candidate_paths(self, path_list):
return [os.path.join(p, self._package_to_load) for p in path_list]
# allow subclasses to customize finding paths
def _get_subpackage_search_paths(self, candidate_paths):
# filter candidate paths for existence (NB: silently ignoring package init code and same-named modules)
return [p for p in candidate_paths if os.path.isdir(to_bytes(p))]
# allow subclasses to customize state validation/manipulation before we return the loader instance
def _validate_final(self):
return
@staticmethod
@contextmanager
def _new_or_existing_module(name, **kwargs):
# handle all-or-nothing sys.modules creation/use-existing/delete-on-exception-if-created behavior
created_module = False
module = sys.modules.get(name)
try:
if not module:
module = ModuleType(name)
created_module = True
sys.modules[name] = module
# always override the values passed, except name (allow reference aliasing)
for attr, value in kwargs.items():
setattr(module, attr, value)
yield module
except Exception:
if created_module:
if sys.modules.get(name):
sys.modules.pop(name)
raise
# basic module/package location support
# NB: this does not support distributed packages!
@staticmethod
def _module_file_from_path(leaf_name, path):
has_code = True
package_path = os.path.join(to_native(path), to_native(leaf_name))
module_path = None
# if the submodule is a package, assemble valid submodule paths, but stop looking for a module
if os.path.isdir(to_bytes(package_path)):
# is there a package init?
module_path = os.path.join(package_path, '__init__.py')
if not os.path.isfile(to_bytes(module_path)):
module_path = os.path.join(package_path, '__synthetic__')
has_code = False
else:
module_path = package_path + '.py'
package_path = None
if not os.path.isfile(to_bytes(module_path)):
raise ImportError('{0} not found at {1}'.format(leaf_name, path))
return module_path, has_code, package_path
def get_resource_reader(self, fullname):
return _AnsibleTraversableResources(fullname, self)
def exec_module(self, module):
# short-circuit redirect; avoid reinitializing existing modules
if self._redirect_module:
return
# execute the module's code in its namespace
code_obj = self.get_code(self._fullname)
if code_obj is not None: # things like NS packages that can't have code on disk will return None
exec(code_obj, module.__dict__)
def create_module(self, spec):
# short-circuit redirect; we've already imported the redirected module, so just alias it and return it
if self._redirect_module:
return self._redirect_module
else:
return None
def load_module(self, fullname):
# short-circuit redirect; we've already imported the redirected module, so just alias it and return it
if self._redirect_module:
sys.modules[self._fullname] = self._redirect_module
return self._redirect_module
# we're actually loading a module/package
module_attrs = dict(
__loader__=self,
__file__=self.get_filename(fullname),
__package__=self._parent_package_name # sane default for non-packages
)
# eg, I am a package
if self._subpackage_search_paths is not None: # empty is legal
module_attrs['__path__'] = self._subpackage_search_paths
module_attrs['__package__'] = fullname # per PEP366
with self._new_or_existing_module(fullname, **module_attrs) as module:
# execute the module's code in its namespace
code_obj = self.get_code(fullname)
if code_obj is not None: # things like NS packages that can't have code on disk will return None
exec(code_obj, module.__dict__)
return module
def is_package(self, fullname):
if fullname != self._fullname:
raise ValueError('this loader cannot answer is_package for {0}, only {1}'.format(fullname, self._fullname))
return self._subpackage_search_paths is not None
def get_source(self, fullname):
if self._decoded_source:
return self._decoded_source
if fullname != self._fullname:
raise ValueError('this loader cannot load source for {0}, only {1}'.format(fullname, self._fullname))
if not self._source_code_path:
return None
# FIXME: what do we want encoding/newline requirements to be?
self._decoded_source = self.get_data(self._source_code_path)
return self._decoded_source
def get_data(self, path):
if not path:
raise ValueError('a path must be specified')
# TODO: ensure we're being asked for a path below something we own
# TODO: try to handle redirects internally?
if not path[0] == '/':
# relative to current package, search package paths if possible (this may not be necessary)
# candidate_paths = [os.path.join(ssp, path) for ssp in self._subpackage_search_paths]
raise ValueError('relative resource paths not supported')
else:
candidate_paths = [path]
for p in candidate_paths:
b_path = to_bytes(p)
if os.path.isfile(b_path):
with open(b_path, 'rb') as fd:
return fd.read()
# HACK: if caller asks for __init__.py and the parent dir exists, return empty string (this keep consistency
# with "collection subpackages don't require __init__.py" working everywhere with get_data
elif b_path.endswith(b'__init__.py') and os.path.isdir(os.path.dirname(b_path)):
return ''
return None
def _synthetic_filename(self, fullname):
return SYNTHETIC_PACKAGE_NAME
def get_filename(self, fullname):
if fullname != self._fullname:
raise ValueError('this loader cannot find files for {0}, only {1}'.format(fullname, self._fullname))
filename = self._source_code_path
if not filename and self.is_package(fullname):
if len(self._subpackage_search_paths) == 1:
filename = os.path.join(self._subpackage_search_paths[0], '__synthetic__')
else:
filename = self._synthetic_filename(fullname)
return filename
def get_code(self, fullname):
if self._compiled_code:
return self._compiled_code
# this may or may not be an actual filename, but it's the value we'll use for __file__
filename = self.get_filename(fullname)
if not filename:
filename = '<string>'
source_code = self.get_source(fullname)
# for things like synthetic modules that really have no source on disk, don't return a code object at all
# vs things like an empty package init (which has an empty string source on disk)
if source_code is None:
return None
self._compiled_code = compile(source=source_code, filename=filename, mode='exec', flags=0, dont_inherit=True)
return self._compiled_code
def iter_modules(self, prefix):
return _iter_modules_impl(self._subpackage_search_paths, prefix)
def __repr__(self):
return '{0}(path={1})'.format(self.__class__.__name__, self._subpackage_search_paths or self._source_code_path)
class _AnsibleCollectionRootPkgLoader(_AnsibleCollectionPkgLoaderBase):
def _validate_args(self):
super(_AnsibleCollectionRootPkgLoader, self)._validate_args()
if len(self._split_name) != 1:
raise ImportError('this loader can only load the ansible_collections toplevel package, not {0}'.format(self._fullname))
# Implements Ansible's custom namespace package support.
# The ansible_collections package and one level down (collections namespaces) are Python namespace packages
# that search across all configured collection roots. The collection package (two levels down) is the first one found
# on the configured collection root path, and Python namespace package aggregation is not allowed at or below
# the collection. Implements implicit package (package dir) support for both Py2/3. Package init code is ignored
# by this loader.
class _AnsibleCollectionNSPkgLoader(_AnsibleCollectionPkgLoaderBase):
def _validate_args(self):
super(_AnsibleCollectionNSPkgLoader, self)._validate_args()
if len(self._split_name) != 2:
raise ImportError('this loader can only load collections namespace packages, not {0}'.format(self._fullname))
def _validate_final(self):
# special-case the `ansible` namespace, since `ansible.builtin` is magical
if not self._subpackage_search_paths and self._package_to_load != 'ansible':
raise ImportError('no {0} found in {1}'.format(self._package_to_load, self._candidate_paths))
# handles locating the actual collection package and associated metadata
class _AnsibleCollectionPkgLoader(_AnsibleCollectionPkgLoaderBase):
def _validate_args(self):
super(_AnsibleCollectionPkgLoader, self)._validate_args()
if len(self._split_name) != 3:
raise ImportError('this loader can only load collection packages, not {0}'.format(self._fullname))
def _validate_final(self):
if self._split_name[1:3] == ['ansible', 'builtin']:
# we don't want to allow this one to have on-disk search capability
self._subpackage_search_paths = []
elif not self._subpackage_search_paths:
raise ImportError('no {0} found in {1}'.format(self._package_to_load, self._candidate_paths))
else:
# only search within the first collection we found
self._subpackage_search_paths = [self._subpackage_search_paths[0]]
def _load_module(self, module):
if not _meta_yml_to_dict:
raise ValueError('ansible.utils.collection_loader._meta_yml_to_dict is not set')
module._collection_meta = {}
# TODO: load collection metadata, cache in __loader__ state
collection_name = '.'.join(self._split_name[1:3])
if collection_name == 'ansible.builtin':
# ansible.builtin is a synthetic collection, get its routing config from the Ansible distro
ansible_pkg_path = os.path.dirname(import_module('ansible').__file__)
metadata_path = os.path.join(ansible_pkg_path, 'config/ansible_builtin_runtime.yml')
with open(to_bytes(metadata_path), 'rb') as fd:
raw_routing = fd.read()
else:
b_routing_meta_path = to_bytes(os.path.join(module.__path__[0], 'meta/runtime.yml'))
if os.path.isfile(b_routing_meta_path):
with open(b_routing_meta_path, 'rb') as fd:
raw_routing = fd.read()
else:
raw_routing = ''
try:
if raw_routing:
routing_dict = _meta_yml_to_dict(raw_routing, (collection_name, 'runtime.yml'))
module._collection_meta = self._canonicalize_meta(routing_dict)
except Exception as ex:
raise ValueError('error parsing collection metadata: {0}'.format(to_native(ex)))
AnsibleCollectionConfig.on_collection_load.fire(collection_name=collection_name, collection_path=os.path.dirname(module.__file__))
return module
def exec_module(self, module):
super(_AnsibleCollectionPkgLoader, self).exec_module(module)
self._load_module(module)
def create_module(self, spec):
return None
def load_module(self, fullname):
module = super(_AnsibleCollectionPkgLoader, self).load_module(fullname)
return self._load_module(module)
def _canonicalize_meta(self, meta_dict):
# TODO: rewrite import keys and all redirect targets that start with .. (current namespace) and . (current collection)
# OR we could do it all on the fly?
# if not meta_dict:
# return {}
#
# ns_name = '.'.join(self._split_name[0:2])
# collection_name = '.'.join(self._split_name[0:3])
#
# #
# for routing_type, routing_type_dict in iteritems(meta_dict.get('plugin_routing', {})):
# for plugin_key, plugin_dict in iteritems(routing_type_dict):
# redirect = plugin_dict.get('redirect', '')
# if redirect.startswith('..'):
# redirect = redirect[2:]
return meta_dict
# loads everything under a collection, including handling redirections defined by the collection
class _AnsibleCollectionLoader(_AnsibleCollectionPkgLoaderBase):
# HACK: stash this in a better place
_redirected_package_map = {} # type: dict[str, str]
_allows_package_code = True
def _validate_args(self):
super(_AnsibleCollectionLoader, self)._validate_args()
if len(self._split_name) < 4:
raise ValueError('this loader is only for sub-collection modules/packages, not {0}'.format(self._fullname))
def _get_candidate_paths(self, path_list):
if len(path_list) != 1 and self._split_name[1:3] != ['ansible', 'builtin']:
raise ValueError('this loader requires exactly one path to search')
return path_list
def _get_subpackage_search_paths(self, candidate_paths):
collection_name = '.'.join(self._split_name[1:3])
collection_meta = _get_collection_metadata(collection_name)
# check for explicit redirection, as well as ancestor package-level redirection (only load the actual code once!)
redirect = None
explicit_redirect = False
routing_entry = _nested_dict_get(collection_meta, ['import_redirection', self._fullname])
if routing_entry:
redirect = routing_entry.get('redirect')
if redirect:
explicit_redirect = True
else:
redirect = _get_ancestor_redirect(self._redirected_package_map, self._fullname)
# NB: package level redirection requires hooking all future imports beneath the redirected source package
# in order to ensure sanity on future relative imports. We always import everything under its "real" name,
# then add a sys.modules entry with the redirected name using the same module instance. If we naively imported
# the source for each redirection, most submodules would import OK, but we'd have N runtime copies of the module
# (one for each name), and relative imports that ascend above the redirected package would break (since they'd
# see the redirected ancestor package contents instead of the package where they actually live).
if redirect:
# FIXME: wrap this so we can be explicit about a failed redirection
self._redirect_module = import_module(redirect)
if explicit_redirect and hasattr(self._redirect_module, '__path__') and self._redirect_module.__path__:
# if the import target looks like a package, store its name so we can rewrite future descendent loads
self._redirected_package_map[self._fullname] = redirect
# if we redirected, don't do any further custom package logic
return None
# we're not doing a redirect- try to find what we need to actually load a module/package
# this will raise ImportError if we can't find the requested module/package at all
if not candidate_paths:
# noplace to look, just ImportError
raise ImportError('package has no paths')
found_path, has_code, package_path = self._module_file_from_path(self._package_to_load, candidate_paths[0])
# still here? we found something to load...
if has_code:
self._source_code_path = found_path
if package_path:
return [package_path] # always needs to be a list
return None
# This loader only answers for intercepted Ansible Python modules. Normal imports will fail here and be picked up later
# by our path_hook importer (which proxies the built-in import mechanisms, allowing normal caching etc to occur)
class _AnsibleInternalRedirectLoader:
def __init__(self, fullname, path_list):
self._redirect = None
split_name = fullname.split('.')
toplevel_pkg = split_name[0]
module_to_load = split_name[-1]
if toplevel_pkg != 'ansible':
raise ImportError('not interested')
builtin_meta = _get_collection_metadata('ansible.builtin')
routing_entry = _nested_dict_get(builtin_meta, ['import_redirection', fullname])
if routing_entry:
self._redirect = routing_entry.get('redirect')
if not self._redirect:
raise ImportError('not redirected, go ask path_hook')
def get_resource_reader(self, fullname):
return _AnsibleTraversableResources(fullname, self)
def exec_module(self, module):
# should never see this
if not self._redirect:
raise ValueError('no redirect found for {0}'.format(module.__spec__.name))
# Replace the module with the redirect
sys.modules[module.__spec__.name] = import_module(self._redirect)
def create_module(self, spec):
return None
def load_module(self, fullname):
# since we're delegating to other loaders, this should only be called for internal redirects where we answered
# find_module with this loader, in which case we'll just directly import the redirection target, insert it into
# sys.modules under the name it was requested by, and return the original module.
# should never see this
if not self._redirect:
raise ValueError('no redirect found for {0}'.format(fullname))
# FIXME: smuggle redirection context, provide warning/error that we tried and failed to redirect
mod = import_module(self._redirect)
sys.modules[fullname] = mod
return mod
class AnsibleCollectionRef:
# FUTURE: introspect plugin loaders to get these dynamically?
VALID_REF_TYPES = frozenset(to_text(r) for r in ['action', 'become', 'cache', 'callback', 'cliconf', 'connection',
'doc_fragments', 'filter', 'httpapi', 'inventory', 'lookup',
'module_utils', 'modules', 'netconf', 'role', 'shell', 'strategy',
'terminal', 'test', 'vars', 'playbook'])
# FIXME: tighten this up to match Python identifier reqs, etc
VALID_SUBDIRS_RE = re.compile(to_text(r'^\w+(\.\w+)*$'))
VALID_FQCR_RE = re.compile(to_text(r'^\w+(\.\w+){2,}$')) # can have 0-N included subdirs as well
def __init__(self, collection_name, subdirs, resource, ref_type):
"""
Create an AnsibleCollectionRef from components
:param collection_name: a collection name of the form 'namespace.collectionname'
:param subdirs: optional subdir segments to be appended below the plugin type (eg, 'subdir1.subdir2')
:param resource: the name of the resource being references (eg, 'mymodule', 'someaction', 'a_role')
:param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
"""
collection_name = to_text(collection_name, errors='strict')
if subdirs is not None:
subdirs = to_text(subdirs, errors='strict')
resource = to_text(resource, errors='strict')
ref_type = to_text(ref_type, errors='strict')
if not self.is_valid_collection_name(collection_name):
raise ValueError('invalid collection name (must be of the form namespace.collection): {0}'.format(to_native(collection_name)))
if ref_type not in self.VALID_REF_TYPES:
raise ValueError('invalid collection ref_type: {0}'.format(ref_type))
self.collection = collection_name
if subdirs:
if not re.match(self.VALID_SUBDIRS_RE, subdirs):
raise ValueError('invalid subdirs entry: {0} (must be empty/None or of the form subdir1.subdir2)'.format(to_native(subdirs)))
self.subdirs = subdirs
else:
self.subdirs = u''
self.resource = resource
self.ref_type = ref_type
package_components = [u'ansible_collections', self.collection]
fqcr_components = [self.collection]
self.n_python_collection_package_name = to_native('.'.join(package_components))
if self.ref_type == u'role':
package_components.append(u'roles')
elif self.ref_type == u'playbook':
package_components.append(u'playbooks')
else:
# we assume it's a plugin
package_components += [u'plugins', self.ref_type]
if self.subdirs:
package_components.append(self.subdirs)
fqcr_components.append(self.subdirs)
if self.ref_type in (u'role', u'playbook'):
# playbooks and roles are their own resource
package_components.append(self.resource)
fqcr_components.append(self.resource)
self.n_python_package_name = to_native('.'.join(package_components))
self._fqcr = u'.'.join(fqcr_components)
def __repr__(self):
return 'AnsibleCollectionRef(collection={0!r}, subdirs={1!r}, resource={2!r})'.format(self.collection, self.subdirs, self.resource)
@property
def fqcr(self):
return self._fqcr
@staticmethod
def from_fqcr(ref, ref_type):
"""
Parse a string as a fully-qualified collection reference, raises ValueError if invalid
:param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
:param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
:return: a populated AnsibleCollectionRef object
"""
# assuming the fq_name is of the form (ns).(coll).(optional_subdir_N).(resource_name),
# we split the resource name off the right, split ns and coll off the left, and we're left with any optional
# subdirs that need to be added back below the plugin-specific subdir we'll add. So:
# ns.coll.resource -> ansible_collections.ns.coll.plugins.(plugintype).resource
# ns.coll.subdir1.resource -> ansible_collections.ns.coll.plugins.subdir1.(plugintype).resource
# ns.coll.rolename -> ansible_collections.ns.coll.roles.rolename
if not AnsibleCollectionRef.is_valid_fqcr(ref):
raise ValueError('{0} is not a valid collection reference'.format(to_native(ref)))
ref = to_text(ref, errors='strict')
ref_type = to_text(ref_type, errors='strict')
ext = ''
if ref_type == u'playbook' and ref.endswith(PB_EXTENSIONS):
resource_splitname = ref.rsplit(u'.', 2)
package_remnant = resource_splitname[0]
resource = resource_splitname[1]
ext = '.' + resource_splitname[2]
else:
resource_splitname = ref.rsplit(u'.', 1)
package_remnant = resource_splitname[0]
resource = resource_splitname[1]
# split the left two components of the collection package name off, anything remaining is plugin-type
# specific subdirs to be added back on below the plugin type
package_splitname = package_remnant.split(u'.', 2)
if len(package_splitname) == 3:
subdirs = package_splitname[2]
else:
subdirs = u''
collection_name = u'.'.join(package_splitname[0:2])
return AnsibleCollectionRef(collection_name, subdirs, resource + ext, ref_type)
@staticmethod
def try_parse_fqcr(ref, ref_type):
"""
Attempt to parse a string as a fully-qualified collection reference, returning None on failure (instead of raising an error)
:param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
:param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
:return: a populated AnsibleCollectionRef object on successful parsing, else None
"""
try:
return AnsibleCollectionRef.from_fqcr(ref, ref_type)
except ValueError:
pass
@staticmethod
def legacy_plugin_dir_to_plugin_type(legacy_plugin_dir_name):
"""
Utility method to convert from a PluginLoader dir name to a plugin ref_type
:param legacy_plugin_dir_name: PluginLoader dir name (eg, 'action_plugins', 'library')
:return: the corresponding plugin ref_type (eg, 'action', 'role')
"""
legacy_plugin_dir_name = to_text(legacy_plugin_dir_name)
plugin_type = legacy_plugin_dir_name.removesuffix(u'_plugins')
if plugin_type == u'library':
plugin_type = u'modules'
if plugin_type not in AnsibleCollectionRef.VALID_REF_TYPES:
raise ValueError('{0} cannot be mapped to a valid collection ref type'.format(to_native(legacy_plugin_dir_name)))
return plugin_type
@staticmethod
def is_valid_fqcr(ref, ref_type=None):
"""
Validates if is string is a well-formed fully-qualified collection reference (does not look up the collection itself)
:param ref: candidate collection reference to validate (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
:param ref_type: optional reference type to enable deeper validation, eg 'module', 'role', 'doc_fragment'
:return: True if the collection ref passed is well-formed, False otherwise
"""
ref = to_text(ref)
if not ref_type:
return bool(re.match(AnsibleCollectionRef.VALID_FQCR_RE, ref))
return bool(AnsibleCollectionRef.try_parse_fqcr(ref, ref_type))
@staticmethod
def is_valid_collection_name(collection_name):
"""
Validates if the given string is a well-formed collection name (does not look up the collection itself)
:param collection_name: candidate collection name to validate (a valid name is of the form 'ns.collname')
:return: True if the collection name passed is well-formed, False otherwise
"""
collection_name = to_text(collection_name)
if collection_name.count(u'.') != 1:
return False
return all(
# NOTE: keywords and identifiers are different in different Pythons
not iskeyword(ns_or_name) and is_python_identifier(ns_or_name)
for ns_or_name in collection_name.split(u'.')
)
def _get_collection_path(collection_name):
collection_name = to_native(collection_name)
if not collection_name or not isinstance(collection_name, str) or len(collection_name.split('.')) != 2:
raise ValueError('collection_name must be a non-empty string of the form namespace.collection')
try:
collection_pkg = import_module('ansible_collections.' + collection_name)
except ImportError:
raise ValueError('unable to locate collection {0}'.format(collection_name))
return to_native(os.path.dirname(to_bytes(collection_pkg.__file__)))
def _get_collection_playbook_path(playbook):
acr = AnsibleCollectionRef.try_parse_fqcr(playbook, u'playbook')
if acr:
try:
# get_collection_path
pkg = import_module(acr.n_python_collection_package_name)
except (IOError, ModuleNotFoundError) as e:
# leaving e as debug target, even though not used in normal code
pkg = None
if pkg:
cpath = os.path.join(sys.modules[acr.n_python_collection_package_name].__file__.replace('__synthetic__', 'playbooks'))
if acr.subdirs:
paths = [to_native(x) for x in acr.subdirs.split(u'.')]
paths.insert(0, cpath)
cpath = os.path.join(*paths)
path = os.path.join(cpath, to_native(acr.resource))
if os.path.exists(to_bytes(path)):
return acr.resource, path, acr.collection
elif not acr.resource.endswith(PB_EXTENSIONS):
for ext in PB_EXTENSIONS:
path = os.path.join(cpath, to_native(acr.resource + ext))
if os.path.exists(to_bytes(path)):
return acr.resource, path, acr.collection
return None
def _get_collection_role_path(role_name, collection_list=None):
return _get_collection_resource_path(role_name, u'role', collection_list)
def _get_collection_resource_path(name, ref_type, collection_list=None):
if ref_type == u'playbook':
# they are handled a bit diff due to 'extension variance' and no collection_list
return _get_collection_playbook_path(name)
acr = AnsibleCollectionRef.try_parse_fqcr(name, ref_type)
if acr:
# looks like a valid qualified collection ref; skip the collection_list
collection_list = [acr.collection]
subdirs = acr.subdirs
resource = acr.resource
elif not collection_list:
return None # not a FQ and no collection search list spec'd, nothing to do
else:
resource = name # treat as unqualified, loop through the collection search list to try and resolve
subdirs = ''
for collection_name in collection_list:
try:
acr = AnsibleCollectionRef(collection_name=collection_name, subdirs=subdirs, resource=resource, ref_type=ref_type)
# FIXME: error handling/logging; need to catch any import failures and move along
pkg = import_module(acr.n_python_package_name)
if pkg is not None:
# the package is now loaded, get the collection's package and ask where it lives
path = os.path.dirname(to_bytes(sys.modules[acr.n_python_package_name].__file__, errors='surrogate_or_strict'))
return resource, to_text(path, errors='surrogate_or_strict'), collection_name
except (IOError, ModuleNotFoundError) as e:
continue
except Exception as ex:
# FIXME: pick out typical import errors first, then error logging
continue
return None
def _get_collection_name_from_path(path):
"""
Return the containing collection name for a given path, or None if the path is not below a configured collection, or
the collection cannot be loaded (eg, the collection is masked by another of the same name higher in the configured
collection roots).
:param path: path to evaluate for collection containment
:return: collection name or None
"""
# ensure we compare full paths since pkg path will be abspath
path = to_native(os.path.abspath(to_bytes(path)))
path_parts = path.split('/')
if path_parts.count('ansible_collections') != 1:
return None
ac_pos = path_parts.index('ansible_collections')
# make sure it's followed by at least a namespace and collection name
if len(path_parts) < ac_pos + 3:
return None
candidate_collection_name = '.'.join(path_parts[ac_pos + 1:ac_pos + 3])
try:
# we've got a name for it, now see if the path prefix matches what the loader sees
imported_pkg_path = to_native(os.path.dirname(to_bytes(import_module('ansible_collections.' + candidate_collection_name).__file__)))
except ImportError:
return None
# reassemble the original path prefix up the collection name, and it should match what we just imported. If not
# this is probably a collection root that's not configured.
original_path_prefix = os.path.join('/', *path_parts[0:ac_pos + 3])
imported_pkg_path = to_native(os.path.abspath(to_bytes(imported_pkg_path)))
if original_path_prefix != imported_pkg_path:
return None
return candidate_collection_name
def _get_import_redirect(collection_meta_dict, fullname):
if not collection_meta_dict:
return None
return _nested_dict_get(collection_meta_dict, ['import_redirection', fullname, 'redirect'])
def _get_ancestor_redirect(redirected_package_map, fullname):
# walk the requested module's ancestor packages to see if any have been previously redirected
cur_pkg = fullname
while cur_pkg:
cur_pkg = cur_pkg.rpartition('.')[0]
ancestor_redirect = redirected_package_map.get(cur_pkg)
if ancestor_redirect:
# rewrite the prefix on fullname so we import the target first, then alias it
redirect = ancestor_redirect + fullname[len(cur_pkg):]
return redirect
return None
def _nested_dict_get(root_dict, key_list):
cur_value = root_dict
for key in key_list:
cur_value = cur_value.get(key)
if not cur_value:
return None
return cur_value
def _iter_modules_impl(paths, prefix=''):
# NB: this currently only iterates what's on disk- redirected modules are not considered
if not prefix:
prefix = ''
else:
prefix = to_native(prefix)
# yield (module_loader, name, ispkg) for each module/pkg under path
# TODO: implement ignore/silent catch for unreadable?
for b_path in map(to_bytes, paths):
if not os.path.isdir(b_path):
continue
for b_basename in sorted(os.listdir(b_path)):
b_candidate_module_path = os.path.join(b_path, b_basename)
if os.path.isdir(b_candidate_module_path):
# exclude things that obviously aren't Python package dirs
# FIXME: this dir is adjustable in py3.8+, check for it
if b'.' in b_basename or b_basename == b'__pycache__':
continue
# TODO: proper string handling?
yield prefix + to_native(b_basename), True
else:
# FIXME: match builtin ordering for package/dir/file, support compiled?
if b_basename.endswith(b'.py') and b_basename != b'__init__.py':
yield prefix + to_native(os.path.splitext(b_basename)[0]), False
def _get_collection_metadata(collection_name):
collection_name = to_native(collection_name)
if not collection_name or not isinstance(collection_name, str) or len(collection_name.split('.')) != 2:
raise ValueError('collection_name must be a non-empty string of the form namespace.collection')
try:
collection_pkg = import_module('ansible_collections.' + collection_name)
except ImportError:
raise ValueError('unable to locate collection {0}'.format(collection_name))
_collection_meta = getattr(collection_pkg, '_collection_meta', None)
if _collection_meta is None:
raise ValueError('collection metadata was not loaded for collection {0}'.format(collection_name))
return _collection_meta
| 55,430
|
Python
|
.py
| 1,016
| 45.094488
| 148
| 0.659839
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,130
|
__init__.py
|
ansible_ansible/lib/ansible/utils/collection_loader/__init__.py
|
# (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# CAUTION: This implementation of the collection loader is used by ansible-test.
# Because of this, it must be compatible with all Python versions supported on the controller or remote.
from __future__ import annotations
# FIXME: decide what of this we want to actually be public/toplevel, put other stuff on a utility class?
from ._collection_config import AnsibleCollectionConfig
from ._collection_finder import AnsibleCollectionRef
from ansible.module_utils.common.text.converters import to_text
def resource_from_fqcr(ref):
"""
Return resource from a fully-qualified collection reference,
or from a simple resource name.
For fully-qualified collection references, this is equivalent to
``AnsibleCollectionRef.from_fqcr(ref).resource``.
:param ref: collection reference to parse
:return: the resource as a unicode string
"""
ref = to_text(ref, errors='strict')
return ref.split(u'.')[-1]
| 1,065
|
Python
|
.py
| 20
| 50
| 113
| 0.7625
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,131
|
list.py
|
ansible_ansible/lib/ansible/collections/list.py
|
# (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.cli.galaxy import with_collection_artifacts_manager
from ansible.galaxy.collection import find_existing_collections
from ansible.module_utils.common.text.converters import to_bytes
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.display import Display
display = Display()
@with_collection_artifacts_manager
def list_collections(coll_filter=None, search_paths=None, dedupe=True, artifacts_manager=None):
collections = {}
for candidate in list_collection_dirs(search_paths=search_paths, coll_filter=coll_filter, artifacts_manager=artifacts_manager, dedupe=dedupe):
collection = _get_collection_name_from_path(candidate)
collections[collection] = candidate
return collections
@with_collection_artifacts_manager
def list_collection_dirs(search_paths=None, coll_filter=None, artifacts_manager=None, dedupe=True):
"""
Return paths for the specific collections found in passed or configured search paths
:param search_paths: list of text-string paths, if none load default config
:param coll_filter: limit collections to just the specific namespace or collection, if None all are returned
:return: list of collection directory paths
"""
namespace_filter = None
collection_filter = None
has_pure_namespace_filter = False # whether at least one coll_filter is a namespace-only filter
if coll_filter is not None:
if isinstance(coll_filter, str):
coll_filter = [coll_filter]
namespace_filter = set()
for coll_name in coll_filter:
if '.' in coll_name:
try:
namespace, collection = coll_name.split('.')
except ValueError:
raise AnsibleError("Invalid collection pattern supplied: %s" % coll_name)
namespace_filter.add(namespace)
if not has_pure_namespace_filter:
if collection_filter is None:
collection_filter = []
collection_filter.append(collection)
else:
namespace_filter.add(coll_name)
has_pure_namespace_filter = True
collection_filter = None
namespace_filter = sorted(namespace_filter)
for req in find_existing_collections(search_paths, artifacts_manager, namespace_filter=namespace_filter,
collection_filter=collection_filter, dedupe=dedupe):
if not has_pure_namespace_filter and coll_filter is not None and req.fqcn not in coll_filter:
continue
yield to_bytes(req.src)
| 2,873
|
Python
|
.py
| 53
| 45
| 146
| 0.694662
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,132
|
template.py
|
ansible_ansible/lib/ansible/modules/template.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import annotations
DOCUMENTATION = r"""
---
module: template
version_added: historical
short_description: Template a file out to a target host
options:
follow:
description:
- Determine whether symbolic links should be followed.
- When set to V(true) symbolic links will be followed, if they exist.
- When set to V(false) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as V(true).
type: bool
default: no
version_added: '2.4'
notes:
- For Windows you can use M(ansible.windows.win_template) which uses V(\\r\\n) as O(newline_sequence) by default.
- The C(jinja2_native) setting has no effect. Native types are never used in the M(ansible.builtin.template) module
which is by design used for generating text files. For working with templates and utilizing Jinja2 native types see
the O(ansible.builtin.template#lookup:jinja2_native) parameter of the P(ansible.builtin.template#lookup) lookup.
seealso:
- module: ansible.builtin.copy
- module: ansible.windows.win_copy
- module: ansible.windows.win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
- action_common_attributes.files
- backup
- files
- template_common
- validate
attributes:
action:
support: full
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix
safe_file_operations:
support: full
vault:
support: full
"""
EXAMPLES = r"""
- name: Template a file to /etc/file.conf
ansible.builtin.template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
ansible.builtin.template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
ansible.builtin.template:
src: named.conf_{{ ansible_os_family }}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: '0640'
- name: Create a DOS-style text file from a template
ansible.builtin.template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
ansible.builtin.template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
ansible.builtin.template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
"""
RETURN = r"""
dest:
description: Destination file/path, equal to the value passed to I(dest).
returned: success
type: str
sample: /path/to/file.txt
checksum:
description: SHA1 checksum of the rendered file
returned: always
type: str
sample: 373296322247ab85d26d5d1257772757e7afd172
uid:
description: Numeric id representing the file owner
returned: success
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success
type: int
sample: 1003
owner:
description: User name of owner
returned: success
type: str
sample: httpd
group:
description: Group name of owner
returned: success
type: str
sample: www-data
md5sum:
description: MD5 checksum of the rendered file
returned: changed
type: str
sample: d41d8cd98f00b204e9800998ecf8427e
mode:
description: Unix permissions of the file in octal representation as a string
returned: success
type: str
sample: 1755
size:
description: Size of the rendered file in bytes
returned: success
type: int
sample: 42
src:
description: Source file used for the copy on the target machine.
returned: changed
type: str
sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
"""
| 4,537
|
Python
|
.py
| 152
| 26.125
| 129
| 0.728562
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,133
|
setup.py
|
ansible_ansible/lib/ansible/modules/setup.py
|
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: setup
version_added: historical
short_description: Gathers facts about remote hosts
options:
gather_subset:
version_added: "2.1"
description:
- "If supplied, restrict the additional facts collected to the given subset.
Possible values: V(all), V(all_ipv4_addresses), V(all_ipv6_addresses), V(apparmor), V(architecture),
V(caps), V(chroot),V(cmdline), V(date_time), V(default_ipv4), V(default_ipv6), V(devices),
V(distribution), V(distribution_major_version), V(distribution_release), V(distribution_version),
V(dns), V(effective_group_ids), V(effective_user_id), V(env), V(facter), V(fips), V(hardware),
V(interfaces), V(is_chroot), V(iscsi), V(kernel), V(local), V(lsb), V(machine), V(machine_id),
V(mounts), V(network), V(ohai), V(os_family), V(pkg_mgr), V(platform), V(processor), V(processor_cores),
V(processor_count), V(python), V(python_version), V(real_user_id), V(selinux), V(service_mgr),
V(ssh_host_key_dsa_public), V(ssh_host_key_ecdsa_public), V(ssh_host_key_ed25519_public),
V(ssh_host_key_rsa_public), V(ssh_host_pub_keys), V(ssh_pub_keys), V(system), V(system_capabilities),
V(system_capabilities_enforced), V(systemd), V(user), V(user_dir), V(user_gecos), V(user_gid), V(user_id),
V(user_shell), V(user_uid), V(virtual), V(virtualization_role), V(virtualization_type).
Can specify a list of values to specify a larger subset.
Values can also be used with an initial C(!) to specify that
that specific subset should not be collected. For instance:
V(!hardware,!network,!virtual,!ohai,!facter). If V(!all) is specified
then only the min subset is collected. To avoid collecting even the
min subset, specify V(!all,!min). To collect only specific facts,
use V(!all,!min), and specify the particular fact subsets.
Use the filter parameter if you do not want to display some collected
facts."
type: list
elements: str
default: "all"
gather_timeout:
version_added: "2.2"
description:
- Set the default timeout in seconds for individual fact gathering.
type: int
default: 10
filter:
version_added: "1.1"
description:
- If supplied, only return facts that match one of the shell-style
(fnmatch) pattern. An empty list basically means 'no filter'.
As of Ansible 2.11, the type has changed from string to list
and the default has became an empty list. A simple string is
still accepted and works as a single pattern. The behaviour
prior to Ansible 2.11 remains.
type: list
elements: str
default: []
fact_path:
version_added: "1.3"
description:
- Path used for local ansible facts (C(*.fact)) - files in this dir
will be run (if executable) and their results be added to C(ansible_local) facts.
If a file is not executable it is read instead.
File/results format can be JSON or INI-format. The default O(fact_path) can be
specified in C(ansible.cfg) for when setup is automatically called as part of
C(gather_facts).
NOTE - For windows clients, the results will be added to a variable named after the
local file (without extension suffix), rather than C(ansible_local).
- Since Ansible 2.1, Windows hosts can use O(fact_path). Make sure that this path
exists on the target host. Files in this path MUST be PowerShell scripts C(.ps1)
which outputs an object. This object will be formatted by Ansible as json so the
script should be outputting a raw hashtable, array, or other primitive object.
type: path
default: /etc/ansible/facts.d
description:
- This module is automatically called by playbooks to gather useful
variables about remote hosts that can be used in playbooks. It can also be
executed directly by C(/usr/bin/ansible) to check what variables are
available to a host. Ansible provides many I(facts) about the system,
automatically.
- This module is also supported for Windows targets.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
attributes:
check_mode:
support: full
diff_mode:
support: none
facts:
support: full
platform:
platforms: posix, windows
notes:
- More ansible facts will be added with successive releases. If I(facter) or
I(ohai) are installed, variables from these programs will also be snapshotted
into the JSON file for usage in templating. These variables are prefixed
with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
bubbled up to the caller. Using the ansible facts and choosing to not
install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
remote systems. (See also M(community.general.facter) and M(community.general.ohai).)
- The filter option filters only the first level subkey below ansible_facts.
- If the target host is Windows, you will not currently have the ability to use
O(filter) as this is provided by a simpler implementation of the module.
- This module should be run with elevated privileges on BSD systems to gather facts like ansible_product_version.
- For more information about delegated facts,
please check U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_delegation.html#delegating-facts).
author:
- "Ansible Core Team"
- "Michael DeHaan"
"""
EXAMPLES = r"""
# Display facts from all hosts and store them indexed by `hostname` at `/tmp/facts`.
# ansible all -m ansible.builtin.setup --tree /tmp/facts
# Display only facts regarding memory found by ansible on all hosts and output them.
# ansible all -m ansible.builtin.setup -a 'filter=ansible_*_mb'
# Display only facts returned by facter.
# ansible all -m ansible.builtin.setup -a 'filter=facter_*'
# Collect only facts returned by facter.
# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,facter'
- name: Collect only facts returned by facter
ansible.builtin.setup:
gather_subset:
- '!all'
- '!<any valid subset>'
- facter
- name: Filter and return only selected facts
ansible.builtin.setup:
filter:
- 'ansible_distribution'
- 'ansible_machine_id'
- 'ansible_*_mb'
# Display only facts about certain interfaces.
# ansible all -m ansible.builtin.setup -a 'filter=ansible_eth[0-2]'
# Restrict additional gathered facts to network and virtual (includes default minimum facts)
# ansible all -m ansible.builtin.setup -a 'gather_subset=network,virtual'
# Collect only network and virtual (excludes default minimum facts)
# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,network,virtual'
# Do not call puppet facter or ohai even if present.
# ansible all -m ansible.builtin.setup -a 'gather_subset=!facter,!ohai'
# Only collect the default minimum amount of facts:
# ansible all -m ansible.builtin.setup -a 'gather_subset=!all'
# Collect no facts, even the default minimum subset of facts:
# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,!min'
# Display facts from Windows hosts with custom facts stored in C:\custom_facts.
# ansible windows -m ansible.builtin.setup -a "fact_path='c:\custom_facts'"
# Gathers facts for the machines in the dbservers group (a.k.a Delegating facts)
- hosts: app_servers
tasks:
- name: Gather facts from db servers
ansible.builtin.setup:
delegate_to: "{{ item }}"
delegate_facts: true
loop: "{{ groups['dbservers'] }}"
"""
# import module snippets
from ..module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.facts import ansible_collector, default_collectors
from ansible.module_utils.facts.collector import CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep
from ansible.module_utils.facts.namespace import PrefixFactNamespace
def main():
module = AnsibleModule(
argument_spec=dict(
gather_subset=dict(default=["all"], required=False, type='list', elements='str'),
gather_timeout=dict(default=10, required=False, type='int'),
filter=dict(default=[], required=False, type='list', elements='str'),
fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
),
supports_check_mode=True,
)
gather_subset = module.params['gather_subset']
gather_timeout = module.params['gather_timeout']
filter_spec = module.params['filter']
# TODO: this mimics existing behavior where gather_subset=["!all"] actually means
# to collect nothing except for the below list
# TODO: decide what '!all' means, I lean towards making it mean none, but likely needs
# some tweaking on how gather_subset operations are performed
minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
'distribution', 'dns', 'env', 'fips', 'local',
'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
'service_mgr', 'ssh_pub_keys', 'user'])
all_collector_classes = default_collectors.collectors
# rename namespace_name to root_key?
namespace = PrefixFactNamespace(namespace_name='ansible',
prefix='ansible_')
try:
fact_collector = ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
namespace=namespace,
filter_spec=filter_spec,
gather_subset=gather_subset,
gather_timeout=gather_timeout,
minimal_gather_subset=minimal_gather_subset)
except (TypeError, CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep) as e:
# bad subset given, collector, idk, deps declared but not found
module.fail_json(msg=to_text(e))
facts_dict = fact_collector.collect(module=module)
module.exit_json(ansible_facts=facts_dict)
if __name__ == '__main__':
main()
| 11,034
|
Python
|
.py
| 198
| 46.520202
| 120
| 0.661731
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,134
|
package_facts.py
|
ansible_ansible/lib/ansible/modules/package_facts.py
|
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import annotations
DOCUMENTATION = """
module: package_facts
short_description: Package information as facts
description:
- Return information about installed packages as facts.
options:
manager:
description:
- The package manager(s) used by the system so we can query the package information.
This is a list and can support multiple package managers per system, since version 2.8.
- The V(portage) and V(pkg) options were added in version 2.8.
- The V(apk) option was added in version 2.11.
- The V(pkg_info)' option was added in version 2.13.
- Aliases were added in 2.18, to support using C(manager={{ansible_facts['pkg_mgr']}})
default: ['auto']
choices:
auto: Depending on O(strategy), will match the first or all package managers provided, in order
rpm: For RPM based distros, requires RPM Python bindings, not installed by default on Suse (python3-rpm)
yum: Alias to rpm
dnf: Alias to rpm
dnf5: Alias to rpm
zypper: Alias to rpm
apt: For DEB based distros, C(python-apt) package must be installed on targeted hosts
portage: Handles ebuild packages, it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'
pkg: libpkg front end (FreeBSD)
pkg5: Alias to pkg
pkgng: Alias to pkg
pacman: Archlinux package manager/builder
apk: Alpine Linux package manager
pkg_info: OpenBSD package manager
openbsd_pkg: Alias to pkg_info
type: list
elements: str
strategy:
description:
- This option controls how the module queries the package managers on the system.
choices:
first: returns only information for the first supported package manager available.
all: returns information for all supported and available package managers on the system.
default: 'first'
type: str
version_added: "2.8"
version_added: "2.5"
requirements:
- See details per package manager in the O(manager) option.
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
attributes:
check_mode:
support: full
diff_mode:
support: none
facts:
support: full
platform:
platforms: posix
"""
EXAMPLES = """
- name: Gather the package facts
ansible.builtin.package_facts:
manager: auto
- name: Print the package facts
ansible.builtin.debug:
var: ansible_facts.packages
- name: Check whether a package called foobar is installed
ansible.builtin.debug:
msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
when: "'foobar' in ansible_facts.packages"
"""
RETURN = """
ansible_facts:
description: Facts to add to ansible_facts.
returned: always
type: complex
contains:
packages:
description:
- Maps the package name to a non-empty list of dicts with package information.
- Every dict in the list corresponds to one installed version of the package.
- The fields described below are present for all package managers. Depending on the
package manager, there might be more fields for a package.
returned: when operating system level package manager is specified or auto detected manager
type: dict
contains:
name:
description: The package's name.
returned: always
type: str
version:
description: The package's version.
returned: always
type: str
source:
description: Where information on the package came from.
returned: always
type: str
sample: |-
{
"packages": {
"kernel": [
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
...
],
"kernel-tools": [
{
"name": "kernel-tools",
"source": "rpm",
"version": "3.10.0",
...
}
],
...
}
}
# Sample rpm
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
# Sample deb
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
# Sample pkg_info
{
"packages": {
"curl": [
{
"name": "curl",
"source": "pkg_info",
"version": "7.79.0"
}
],
"intel-firmware": [
{
"name": "intel-firmware",
"source": "pkg_info",
"version": "20210608v0"
}
],
}
}
"""
import re
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.facts.packages import CLIMgr, RespawningLibMgr, get_all_pkg_managers
ALIASES = {
'rpm': ['dnf', 'dnf5', 'yum' , 'zypper'],
'pkg': ['pkg5', 'pkgng'],
'pkg_info': ['openbsd_pkg'],
}
class RPM(RespawningLibMgr):
LIB = 'rpm'
CLI_BINARIES = ['rpm']
INTERPRETERS = [
'/usr/libexec/platform-python',
'/usr/bin/python3',
]
def list_installed(self):
return self._lib.TransactionSet().dbMatch()
def get_package_details(self, package):
return dict(name=package[self._lib.RPMTAG_NAME],
version=package[self._lib.RPMTAG_VERSION],
release=package[self._lib.RPMTAG_RELEASE],
epoch=package[self._lib.RPMTAG_EPOCH],
arch=package[self._lib.RPMTAG_ARCH],)
class APT(RespawningLibMgr):
LIB = 'apt'
CLI_BINARIES = ['apt', 'apt-get', 'aptitude']
def __init__(self):
self._cache = None
super(APT, self).__init__()
@property
def pkg_cache(self):
if self._cache is not None:
return self._cache
self._cache = self._lib.Cache()
return self._cache
def list_installed(self):
# Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
cache = self.pkg_cache
return [pk for pk in cache.keys() if cache[pk].is_installed]
def get_package_details(self, package):
ac_pkg = self.pkg_cache[package].installed
return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
class PACMAN(CLIMgr):
CLI = 'pacman'
def list_installed(self):
locale = get_best_parsable_locale(module)
rc, out, err = module.run_command([self._cli, '-Qi'], environ_update=dict(LC_ALL=locale))
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.split("\n\n")[:-1]
def get_package_details(self, package):
# parse values of details that might extend over several lines
raw_pkg_details = {}
last_detail = None
for line in package.splitlines():
m = re.match(r"([\w ]*[\w]) +: (.*)", line)
if m:
last_detail = m.group(1)
raw_pkg_details[last_detail] = m.group(2)
else:
# append value to previous detail
raw_pkg_details[last_detail] = raw_pkg_details[last_detail] + " " + line.lstrip()
provides = None
if raw_pkg_details['Provides'] != 'None':
provides = [
p.split('=')[0]
for p in raw_pkg_details['Provides'].split(' ')
]
return {
'name': raw_pkg_details['Name'],
'version': raw_pkg_details['Version'],
'arch': raw_pkg_details['Architecture'],
'provides': provides,
}
class PKG(CLIMgr):
CLI = 'pkg'
atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
pkg = dict(zip(self.atoms, package.split('\t')))
if 'arch' in pkg:
try:
pkg['arch'] = pkg['arch'].split(':')[2]
except IndexError:
pass
if 'automatic' in pkg:
pkg['automatic'] = bool(int(pkg['automatic']))
if 'category' in pkg:
pkg['category'] = pkg['category'].split('/', 1)[0]
if 'version' in pkg:
if ',' in pkg['version']:
pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
else:
pkg['port_epoch'] = 0
if '_' in pkg['version']:
pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
else:
pkg['revision'] = '0'
if 'vital' in pkg:
pkg['vital'] = bool(int(pkg['vital']))
return pkg
class PORTAGE(CLIMgr):
CLI = 'qlist'
atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
def list_installed(self):
rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
if rc != 0:
raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
return out.splitlines()
def get_package_details(self, package):
return dict(zip(self.atoms, package.split()))
class APK(CLIMgr):
CLI = 'apk'
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'info', '-v'])
if rc != 0:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
raw_pkg_details = {'name': package, 'version': '', 'release': ''}
nvr = package.rsplit('-', 2)
try:
return {
'name': nvr[0],
'version': nvr[1],
'release': nvr[2],
}
except IndexError:
return raw_pkg_details
class PKG_INFO(CLIMgr):
CLI = 'pkg_info'
def list_installed(self):
rc, out, err = module.run_command([self._cli, '-a'])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
raw_pkg_details = {'name': package, 'version': ''}
details = package.split(maxsplit=1)[0].rsplit('-', maxsplit=1)
try:
return {
'name': details[0],
'version': details[1],
}
except IndexError:
return raw_pkg_details
def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
# add aliases
PKG_MANAGER_NAMES.extend([alias for alist in ALIASES.values() for alias in alist])
# start work
global module
# choices are not set for 'manager' as they are computed dynamically and validated below instead of in argspec
module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'elements': 'str', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
results = {'ansible_facts': {}}
managers = [x.lower() for x in module.params['manager']]
strategy = module.params['strategy']
if 'auto' in managers:
# keep order from user, we do dedupe below
managers.extend(PKG_MANAGER_NAMES)
managers.remove('auto')
unsupported = set(managers).difference(PKG_MANAGER_NAMES)
if unsupported:
if 'auto' in module.params['manager']:
msg = 'Could not auto detect a usable package manager, check warnings for details.'
else:
msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
module.fail_json(msg=msg)
found = 0
seen = set()
for pkgmgr in managers:
if strategy == 'first' and found:
break
# substitute aliases for aliased
for aliased in ALIASES.keys():
if pkgmgr in ALIASES[aliased]:
pkgmgr = aliased
break
# dedupe as per above
if pkgmgr in seen:
continue
seen.add(pkgmgr)
manager = PKG_MANAGERS[pkgmgr]()
try:
if manager.is_available(handle_exceptions=False):
found += 1
try:
packages.update(manager.get_packages())
except Exception as e:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
if found == 0:
msg = ('Could not detect a supported package manager from the following list: %s, '
'or the required Python library is not installed. Check warnings for details.' % managers)
module.fail_json(msg=msg)
# Set the facts, this will override the facts in ansible_facts that might exist from previous runs
# when using operating system level or distribution package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
| 16,806
|
Python
|
.py
| 456
| 26.425439
| 141
| 0.533682
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,135
|
slurp.py
|
ansible_ansible/lib/ansible/modules/slurp.py
|
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: slurp
version_added: historical
short_description: Slurps a file from remote nodes
description:
- This module works like M(ansible.builtin.fetch). It is used for fetching a base64-
encoded blob containing the data in a remote file.
- This module is also supported for Windows targets.
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a directory.
type: path
required: true
aliases: [ path ]
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix, windows
notes:
- This module returns an 'in memory' base64 encoded version of the file, take
into account that this will require at least twice the RAM as the original file size.
seealso:
- module: ansible.builtin.fetch
author:
- Ansible Core Team
- Michael DeHaan (@mpdehaan)
"""
EXAMPLES = r"""
- name: Find out what the remote machine's mounts are
ansible.builtin.slurp:
src: /proc/mounts
register: mounts
- name: Print returned information
ansible.builtin.debug:
msg: "{{ mounts['content'] | b64decode }}"
# From the commandline, find the pid of the remote machine's sshd
# $ ansible host -m ansible.builtin.slurp -a 'src=/var/run/sshd.pid'
# host | SUCCESS => {
# "changed": false,
# "content": "MjE3OQo=",
# "encoding": "base64",
# "source": "/var/run/sshd.pid"
# }
# $ echo MjE3OQo= | base64 -d
# 2179
"""
RETURN = r"""
content:
description: Encoded file content
returned: success
type: str
sample: "MjE3OQo="
encoding:
description: Type of encoding used for file
returned: success
type: str
sample: "base64"
source:
description: Actual path of file slurped
returned: success
type: str
sample: "/var/run/sshd.pid"
"""
import base64
import errno
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path', required=True, aliases=['path']),
),
supports_check_mode=True,
)
source = module.params['src']
try:
with open(source, 'rb') as source_fh:
source_content = source_fh.read()
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
msg = "file not found: %s" % source
elif e.errno == errno.EACCES:
msg = "file is not readable: %s" % source
elif e.errno == errno.EISDIR:
msg = "source is a directory and must be a file: %s" % source
else:
msg = "unable to slurp file: %s" % to_native(e, errors='surrogate_then_replace')
module.fail_json(msg)
data = base64.b64encode(source_content)
module.exit_json(content=data, source=source, encoding='base64')
if __name__ == '__main__':
main()
| 3,208
|
Python
|
.py
| 103
| 26.893204
| 92
| 0.675737
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,136
|
expect.py
|
ansible_ansible/lib/ansible/modules/expect.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: expect
version_added: '2.0'
short_description: Executes a command and responds to prompts
description:
- The M(ansible.builtin.expect) module executes a command and responds to prompts.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work.
options:
command:
description:
- The command module takes command to run.
required: true
type: str
creates:
type: path
description:
- A filename, when it already exists, this step will B(not) be run.
removes:
type: path
description:
- A filename, when it does not exist, this step will B(not) be run.
chdir:
type: path
description:
- Change into this directory before running the command.
responses:
type: dict
description:
- Mapping of prompt regular expressions and corresponding answer(s).
- Each key in O(responses) is a Python regex U(https://docs.python.org/3/library/re.html#regular-expression-syntax).
- The value of each key is a string or list of strings.
If the value is a string and the prompt is encountered multiple times, the answer will be repeated.
Provide the value as a list to give different answers for successive matches.
required: true
timeout:
type: raw
description:
- Amount of time in seconds to wait for the expected strings. Use
V(null) to disable timeout.
default: 30
echo:
description:
- Whether or not to echo out your response strings.
default: false
type: bool
requirements:
- python >= 2.6
- pexpect >= 3.3
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
platform:
support: full
platforms: posix
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), and so on), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else").
- Case insensitive searches are indicated with a prefix of C((?i)).
- The C(pexpect) library used by this module operates with a search window
of 2000 bytes, and does not use a multiline regex match. To perform a
start of line bound match, use a pattern like C((?m)^pattern).
- The M(ansible.builtin.expect) module is designed for simple scenarios.
For more complex needs, consider the use of expect code with the M(ansible.builtin.shell)
or M(ansible.builtin.script) modules. (An example is part of the M(ansible.builtin.shell) module documentation).
- If the command returns non UTF-8 data, it must be encoded to avoid issues. One option is to pipe
the output through C(base64).
seealso:
- module: ansible.builtin.script
- module: ansible.builtin.shell
author: "Matt Martz (@sivel)"
"""
EXAMPLES = r"""
- name: Case insensitive password string match
ansible.builtin.expect:
command: passwd username
responses:
(?i)password: "MySekretPa$$word"
# you don't want to show passwords in your logs
no_log: true
- name: Match multiple regular expressions and demonstrate individual and repeated responses
ansible.builtin.expect:
command: /path/to/custom/command
responses:
Question:
# give a unique response for each of the 3 hypothetical prompts matched
- response1
- response2
- response3
# give the same response for every matching prompt
"^Match another prompt$": "response"
- name: Multiple questions with responses
ansible.builtin.expect:
command: /path/to/custom/command
responses:
"Please provide your name":
- "Anna"
"Database user":
- "{{ db_username }}"
"Database password":
- "{{ db_password }}"
"""
import datetime
import os
import traceback
PEXPECT_IMP_ERR = None
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
PEXPECT_IMP_ERR = traceback.format_exc()
HAS_PEXPECT = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.common.validation import check_type_int
def response_closure(module, question, responses):
resp_gen = (b'%s\n' % to_bytes(r).rstrip(b'\n') for r in responses)
def wrapped(info):
try:
return next(resp_gen)
except StopIteration:
module.fail_json(msg="No remaining responses for '%s', "
"output was '%s'" %
(question,
info['child_result_list'][-1]))
return wrapped
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True),
chdir=dict(type='path'),
creates=dict(type='path'),
removes=dict(type='path'),
responses=dict(type='dict', required=True),
timeout=dict(type='raw', default=30),
echo=dict(type='bool', default=False),
)
)
if not HAS_PEXPECT:
module.fail_json(msg=missing_required_lib("pexpect"),
exception=PEXPECT_IMP_ERR)
chdir = module.params['chdir']
args = module.params['command']
creates = module.params['creates']
removes = module.params['removes']
responses = module.params['responses']
timeout = module.params['timeout']
if timeout is not None:
try:
timeout = check_type_int(timeout)
except TypeError as te:
module.fail_json(msg=f"argument 'timeout' is of type {type(timeout)} and we were unable to convert to int: {te}")
echo = module.params['echo']
events = dict()
for key, value in responses.items():
if isinstance(value, list):
response = response_closure(module, key, value)
else:
response = b'%s\n' % to_bytes(value).rstrip(b'\n')
events[to_bytes(key)] = response
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(chdir)
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if os.path.exists(creates):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not os.path.exists(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
rc=0
)
startd = datetime.datetime.now()
try:
try:
# Prefer pexpect.run from pexpect>=4
b_out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo,
encoding=None)
except TypeError:
# Use pexpect._run in pexpect>=3.3,<4
# pexpect.run doesn't support `echo`
# pexpect.runu doesn't support encoding=None
b_out, rc = pexpect._run(args, timeout=timeout, withexitstatus=True,
events=events, extra_args=None, logfile=None,
cwd=chdir, env=None, _spawn=pexpect.spawn,
echo=echo)
except (TypeError, AttributeError) as e:
# This should catch all insufficient versions of pexpect
# We deem them insufficient for their lack of ability to specify
# to not echo responses via the run/runu functions, which would
# potentially leak sensitive information
module.fail_json(msg='Insufficient version of pexpect installed '
'(%s), this module requires pexpect>=3.3. '
'Error was %s' % (pexpect.__version__, to_native(e)))
except pexpect.ExceptionPexpect as e:
module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc())
endd = datetime.datetime.now()
delta = endd - startd
if b_out is None:
b_out = b''
result = dict(
cmd=args,
stdout=to_native(b_out).rstrip('\r\n'),
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc is None:
module.fail_json(msg='command exceeded timeout', **result)
elif rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 9,369
|
Python
|
.py
| 242
| 30.739669
| 125
| 0.630815
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,137
|
user.py
|
ansible_ansible/lib/ansible/modules/user.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
module: user
version_added: "0.2"
short_description: Manage user accounts
description:
- Manage user accounts and user attributes.
- For Windows targets, use the M(ansible.windows.win_user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
type: str
required: true
aliases: [ user ]
uid:
description:
- Optionally sets the I(UID) of the user.
type: int
comment:
description:
- Optionally sets the description (aka I(GECOS)) of user account.
- On macOS, this defaults to the O(name) option.
type: str
hidden:
description:
- macOS only, optionally hide the user from the login window and system preferences.
- The default will be V(true) if the O(system) option is used.
type: bool
version_added: "2.6"
non_unique:
description:
- Optionally when used with the C(-u) option, this option allows to change the user ID to a non-unique value.
type: bool
default: no
version_added: "1.1"
seuser:
description:
- Optionally sets the C(seuser) type C(user_u) on SELinux enabled systems.
type: str
version_added: "2.1"
group:
description:
- Optionally sets the user's primary group (takes a group name).
- On macOS, this defaults to V(staff).
type: str
groups:
description:
- A list of supplementary groups which the user is also a member of.
- By default, the user is removed from all other groups. Configure O(append) to modify this.
- When set to an empty string V(''),
the user is removed from all groups except the primary group.
- Before Ansible 2.3, the only input format allowed was a comma separated string.
type: list
elements: str
append:
description:
- If V(true), add the user to the groups specified in O(groups).
- If V(false), user will only be added to the groups specified in O(groups),
removing them from all other groups.
type: bool
default: no
shell:
description:
- Optionally set the user's shell.
- On macOS, before Ansible 2.5, the default shell for non-system users was V(/usr/bin/false).
Since Ansible 2.5, the default shell for non-system users on macOS is V(/bin/bash).
- On other operating systems, the default shell is determined by the underlying tool
invoked by this module. See Notes for a per platform list of invoked tools.
- From Ansible 2.18, the type is changed to I(path) from I(str).
type: path
home:
description:
- Optionally set the user's home directory.
type: path
skeleton:
description:
- Optionally set a home skeleton directory.
- Requires O(create_home) option!
type: str
version_added: "2.0"
password:
description:
- If provided, set the user's password to the provided encrypted hash (Linux) or plain text password (macOS).
- B(Linux/Unix/POSIX:) Enter the hashed password as the value.
- See L(FAQ entry,https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module)
for details on various ways to generate the hash of a password.
- To create an account with a locked/disabled password on Linux systems, set this to V('!') or V('*').
- To create an account with a locked/disabled password on OpenBSD, set this to V('*************').
- B(OS X/macOS:) Enter the cleartext password as the value. Be sure to take relevant security precautions.
- On macOS, the password specified in the C(password) option will always be set, regardless of whether the user account already exists or not.
- When the password is passed as an argument, the M(ansible.builtin.user) module will always return changed to C(true) for macOS systems.
Since macOS no longer provides access to the hashed passwords directly.
type: str
state:
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
- See this L(FAQ entry,https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#running-on-macos-as-a-target)
for additional requirements when removing users on macOS systems.
type: str
choices: [ absent, present ]
default: present
create_home:
description:
- Unless set to V(false), a home directory will be made for the user
when the account is created or if the home directory does not exist.
- Changed from O(createhome) to O(create_home) in Ansible 2.5.
type: bool
default: yes
aliases: [ createhome ]
move_home:
description:
- "If set to V(true) when used with O(home), attempt to move the user's old home
directory to the specified directory if it isn't there already and the old home exists."
type: bool
default: no
system:
description:
- When creating an account O(state=present), setting this to V(true) makes the user a system account.
- This setting cannot be changed on existing users.
type: bool
default: no
force:
description:
- This only affects O(state=absent), it forces removal of the user and associated directories on supported platforms.
- The behavior is the same as C(userdel --force), check the man page for C(userdel) on your system for details and support.
- When used with O(generate_ssh_key=yes) this forces an existing key to be overwritten.
type: bool
default: no
remove:
description:
- This only affects O(state=absent), it attempts to remove directories associated with the user.
- The behavior is the same as C(userdel --remove), check the man page for details and support.
type: bool
default: no
login_class:
description:
- Optionally sets the user's login class, a feature of most BSD OSs.
type: str
generate_ssh_key:
description:
- Whether to generate a SSH key for the user in question.
- This will B(not) overwrite an existing SSH key unless used with O(force=yes).
type: bool
default: no
version_added: "0.9"
ssh_key_bits:
description:
- Optionally specify number of bits in SSH key to create.
- The default value depends on C(ssh-keygen).
type: int
version_added: "0.9"
ssh_key_type:
description:
- Optionally specify the type of SSH key to generate.
- Available SSH key types will depend on implementation
present on target host.
type: str
default: rsa
version_added: "0.9"
ssh_key_file:
description:
- Optionally specify the SSH key filename.
- If this is a relative filename then it will be relative to the user's home directory.
- This parameter defaults to V(.ssh/id_rsa).
type: path
version_added: "0.9"
ssh_key_comment:
description:
- Optionally define the comment for the SSH key.
type: str
default: ansible-generated on $HOSTNAME
version_added: "0.9"
ssh_key_passphrase:
description:
- Set a passphrase for the SSH key.
- If no passphrase is provided, the SSH key will default to having no passphrase.
type: str
version_added: "0.9"
update_password:
description:
- V(always) will update passwords if they differ.
- V(on_create) will only set the password for newly created users.
type: str
choices: [ always, on_create ]
default: always
version_added: "1.3"
expires:
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
- Currently supported on GNU/Linux, FreeBSD, and DragonFlyBSD.
- Since Ansible 2.6 you can remove the expiry time by specifying a negative value.
Currently supported on GNU/Linux and FreeBSD.
type: float
version_added: "1.9"
password_lock:
description:
- Lock the password (C(usermod -L), C(usermod -U), C(pw lock)).
- Implementation differs by platform. This option does not always mean the user cannot login using other methods.
- This option does not disable the user, only lock the password.
- This must be set to V(false) in order to unlock a currently locked password. The absence of this parameter will not unlock a password.
- Currently supported on Linux, FreeBSD, DragonFlyBSD, NetBSD, OpenBSD.
type: bool
version_added: "2.6"
local:
description:
- Forces the use of "local" command alternatives on platforms that implement it.
- This is useful in environments that use centralized authentication when you want to manipulate the local users
(in other words, it uses C(luseradd) instead of C(useradd)).
- This will check C(/etc/passwd) for an existing account before invoking commands. If the local account database
exists somewhere other than C(/etc/passwd), this setting will not work properly.
- This requires that the above commands as well as C(/etc/passwd) must exist on the target host, otherwise it will be a fatal error.
type: bool
default: no
version_added: "2.4"
profile:
description:
- Sets the profile of the user.
- Can set multiple profiles using comma separation.
- To delete all the profiles, use O(profile='').
- Currently supported on Illumos/Solaris. Does nothing when used with other platforms.
type: str
version_added: "2.8"
authorization:
description:
- Sets the authorization of the user.
- Can set multiple authorizations using comma separation.
- To delete all authorizations, use O(authorization='').
- Currently supported on Illumos/Solaris. Does nothing when used with other platforms.
type: str
version_added: "2.8"
role:
description:
- Sets the role of the user.
- Can set multiple roles using comma separation.
- To delete all roles, use O(role='').
- Currently supported on Illumos/Solaris. Does nothing when used with other platforms.
type: str
version_added: "2.8"
password_expire_max:
description:
- Maximum number of days between password change.
- Supported on Linux only.
type: int
version_added: "2.11"
password_expire_min:
description:
- Minimum number of days between password change.
- Supported on Linux only.
type: int
version_added: "2.11"
password_expire_warn:
description:
- Number of days of warning before password expires.
- Supported on Linux only.
type: int
version_added: "2.16"
umask:
description:
- Sets the umask of the user.
- Currently supported on Linux. Does nothing when used with other platforms.
- Requires O(local) is omitted or V(false).
type: str
version_added: "2.12"
password_expire_account_disable:
description:
- Number of days after a password expires until the account is disabled.
- Currently supported on AIX, Linux, NetBSD, OpenBSD.
type: int
version_added: "2.18"
uid_min:
description:
- Sets the UID_MIN value for user creation.
- Overwrites /etc/login.defs default value.
- Currently supported on Linux. Does nothing when used with other platforms.
- Requires O(local) is omitted or V(False).
type: int
version_added: "2.18"
uid_max:
description:
- Sets the UID_MAX value for user creation.
- Overwrites /etc/login.defs default value.
- Currently supported on Linux. Does nothing when used with other platforms.
- Requires O(local) is omitted or V(False).
type: int
version_added: "2.18"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
notes:
- There are specific requirements per platform on user management utilities. However
they generally come pre-installed with the system and Ansible will require they
are present at runtime. If they are not, a descriptive error message will be shown.
- On SunOS platforms, the shadow file is backed up automatically since this module edits it directly.
On other platforms, the shadow file is backed up by the underlying tools used by this module.
- On macOS, this module uses C(dscl) to create, modify, and delete accounts. C(dseditgroup) is used to
modify group membership. Accounts are hidden from the login window by modifying
C(/Library/Preferences/com.apple.loginwindow.plist).
- On FreeBSD, this module uses C(pw useradd) and C(chpass) to create, C(pw usermod) and C(chpass) to modify,
C(pw userdel) remove, C(pw lock) to lock, and C(pw unlock) to unlock accounts.
- On all other platforms, this module uses C(useradd) to create, C(usermod) to modify, and
C(userdel) to remove accounts.
seealso:
- module: ansible.posix.authorized_key
- module: ansible.builtin.group
- module: ansible.windows.win_user
author:
- Stephen Fromm (@sfromm)
"""
EXAMPLES = r"""
- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
ansible.builtin.user:
name: johnd
comment: John Doe
uid: 1040
group: admin
- name: Create a user 'johnd' with a home directory
ansible.builtin.user:
name: johnd
create_home: yes
- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
ansible.builtin.user:
name: james
shell: /bin/bash
groups: admins,developers
append: yes
- name: Remove the user 'johnd'
ansible.builtin.user:
name: johnd
state: absent
remove: yes
- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
ansible.builtin.user:
name: jsmith
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_file: .ssh/id_rsa
- name: Added a consultant whose account you want to expire
ansible.builtin.user:
name: james18
shell: /bin/zsh
groups: developers
expires: 1422403387
- name: Starting at Ansible 2.6, modify user, remove expiry time
ansible.builtin.user:
name: james18
expires: -1
- name: Set maximum expiration date for password
ansible.builtin.user:
name: ram19
password_expire_max: 10
- name: Set minimum expiration date for password
ansible.builtin.user:
name: pushkar15
password_expire_min: 5
- name: Set number of warning days for password expiration
ansible.builtin.user:
name: jane157
password_expire_warn: 30
- name: Set number of days after password expires until account is disabled
ansible.builtin.user:
name: jimholden2016
password_expire_account_disable: 15
"""
RETURN = r"""
append:
description: Whether or not to append the user to groups.
returned: When O(state) is V(present) and the user exists
type: bool
sample: True
comment:
description: Comment section from passwd file, usually the user name.
returned: When user exists
type: str
sample: Agent Smith
create_home:
description: Whether or not to create the home directory.
returned: When user does not exist and not check mode
type: bool
sample: True
force:
description: Whether or not a user account was forcibly deleted.
returned: When O(state) is V(absent) and user exists
type: bool
sample: False
group:
description: Primary user group ID
returned: When user exists
type: int
sample: 1001
groups:
description: List of groups of which the user is a member.
returned: When O(groups) is not empty and O(state) is V(present)
type: str
sample: 'chrony,apache'
home:
description: "Path to user's home directory."
returned: When O(state) is V(present)
type: str
sample: '/home/asmith'
move_home:
description: Whether or not to move an existing home directory.
returned: When O(state) is V(present) and user exists
type: bool
sample: False
name:
description: User account name.
returned: always
type: str
sample: asmith
password:
description: Masked value of the password.
returned: When O(state) is V(present) and O(password) is not empty
type: str
sample: 'NOT_LOGGING_PASSWORD'
remove:
description: Whether or not to remove the user account.
returned: When O(state) is V(absent) and user exists
type: bool
sample: True
shell:
description: User login shell.
returned: When O(state) is V(present)
type: str
sample: '/bin/bash'
ssh_fingerprint:
description: Fingerprint of generated SSH key.
returned: When O(generate_ssh_key) is V(True)
type: str
sample: '2048 SHA256:aYNHYcyVm87Igh0IMEDMbvW0QDlRQfE0aJugp684ko8 ansible-generated on host (RSA)'
ssh_key_file:
description: Path to generated SSH private key file.
returned: When O(generate_ssh_key) is V(True)
type: str
sample: /home/asmith/.ssh/id_rsa
ssh_public_key:
description: Generated SSH public key file.
returned: When O(generate_ssh_key) is V(True)
type: str
sample: >
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC95opt4SPEC06tOYsJQJIuN23BbLMGmYo8ysVZQc4h2DZE9ugbjWWGS1/pweUGjVstgzMkBEeBCByaEf/RJKNecKRPeGd2Bw9DCj/bn5Z6rGfNENKBmo
618mUJBvdlEgea96QGjOwSB7/gmonduC7gsWDMNcOdSE3wJMTim4lddiBx4RgC9yXsJ6Tkz9BHD73MXPpT5ETnse+A3fw3IGVSjaueVnlUyUmOBf7fzmZbhlFVXf2Zi2rFTXqvbdGHKkzpw1U8eB8xFPP7y
d5u1u0e6Acju/8aZ/l17IDFiLke5IzlqIMRTEbDwLNeO84YQKWTm9fODHzhYe0yvxqLiK07 ansible-generated on host'
stderr:
description: Standard error from running commands.
returned: When stderr is returned by a command that is run
type: str
sample: Group wheels does not exist
stdout:
description: Standard output from running commands.
returned: When standard output is returned by the command that is run
type: str
sample:
system:
description: Whether or not the account is a system account.
returned: When O(system) is passed to the module and the account does not exist
type: bool
sample: True
uid:
description: User ID of the user account.
returned: When O(uid) is passed to the module
type: int
sample: 1044
"""
import ctypes.util
import grp
import calendar
import os
import re
import pty
import pwd
import select
import shutil
import socket
import subprocess
import time
import math
from ansible.module_utils import distro
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.sys_info import get_platform_subclass
import ansible.module_utils.compat.typing as t
class StructSpwdType(ctypes.Structure):
_fields_ = [
('sp_namp', ctypes.c_char_p),
('sp_pwdp', ctypes.c_char_p),
('sp_lstchg', ctypes.c_long),
('sp_min', ctypes.c_long),
('sp_max', ctypes.c_long),
('sp_warn', ctypes.c_long),
('sp_inact', ctypes.c_long),
('sp_expire', ctypes.c_long),
('sp_flag', ctypes.c_ulong),
]
try:
_LIBC = ctypes.cdll.LoadLibrary(
t.cast(
str,
ctypes.util.find_library('c')
)
)
_LIBC.getspnam.argtypes = (ctypes.c_char_p,)
_LIBC.getspnam.restype = ctypes.POINTER(StructSpwdType)
HAVE_SPWD = True
except AttributeError:
HAVE_SPWD = False
_HASH_RE = re.compile(r'[^a-zA-Z0-9./=]')
def getspnam(b_name):
return _LIBC.getspnam(b_name).contents
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None # type: str | None
PASSWORDFILE = '/etc/passwd'
SHADOWFILE = '/etc/shadow' # type: str | None
SHADOWFILE_EXPIRE_INDEX = 7
LOGIN_DEFS = '/etc/login.defs'
DATE_FORMAT = '%Y-%m-%d'
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(User)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.hidden = module.params['hidden']
self.non_unique = module.params['non_unique']
self.seuser = module.params['seuser']
self.group = module.params['group']
self.comment = module.params['comment']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.create_home = module.params['create_home']
self.move_home = module.params['move_home']
self.skeleton = module.params['skeleton']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.home = module.params['home']
self.expires = None
self.password_lock = module.params['password_lock']
self.groups = None
self.local = module.params['local']
self.profile = module.params['profile']
self.authorization = module.params['authorization']
self.role = module.params['role']
self.password_expire_max = module.params['password_expire_max']
self.password_expire_min = module.params['password_expire_min']
self.password_expire_warn = module.params['password_expire_warn']
self.umask = module.params['umask']
self.inactive = module.params['password_expire_account_disable']
self.uid_min = module.params['uid_min']
self.uid_max = module.params['uid_max']
if self.local:
if self.umask is not None:
module.fail_json(msg="'umask' can not be used with 'local'")
if self.uid_min is not None:
module.fail_json(msg="'uid_min' can not be used with 'local'")
if self.uid_max is not None:
module.fail_json(msg="'uid_max' can not be used with 'local'")
if module.params['groups'] is not None:
self.groups = ','.join(module.params['groups'])
if module.params['expires'] is not None:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception as e:
module.fail_json(msg="Invalid value for 'expires' %s: %s" % (self.expires, to_native(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
if self.groups is None and self.append:
# Change the argument_spec in 2.14 and remove this warning
# required_by={'append': ['groups']}
module.warn("'append' is set, but no 'groups' are specified. Use 'groups' for appending new groups."
"This will change to an error in Ansible 2.14.")
def check_password_encrypted(self):
# Darwin needs cleartext password, so skip validation
if self.module.params['password'] and self.platform != 'Darwin':
maybe_invalid = False
# Allow setting certain passwords in order to disable the account
if self.module.params['password'] in set(['*', '!', '*************']):
maybe_invalid = False
else:
# : for delimiter, * for disable user, ! for lock user
# these characters are invalid in the password
if any(char in self.module.params['password'] for char in ':*!'):
maybe_invalid = True
if '$' not in self.module.params['password']:
maybe_invalid = True
else:
fields = self.module.params['password'].split("$")
if len(fields) >= 3:
# contains character outside the crypto constraint
if bool(_HASH_RE.search(fields[-1])):
maybe_invalid = True
# md5
if fields[1] == '1' and len(fields[-1]) != 22:
maybe_invalid = True
# sha256
if fields[1] == '5' and len(fields[-1]) != 43:
maybe_invalid = True
# sha512
if fields[1] == '6' and len(fields[-1]) != 86:
maybe_invalid = True
# yescrypt
if fields[1] == 'y' and len(fields[-1]) != 43:
maybe_invalid = True
else:
maybe_invalid = True
if maybe_invalid:
self.module.warn("The input password appears not to have been hashed. "
"The 'password' argument must be encrypted for this module to work properly.")
def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
if self.module.check_mode and obey_checkmode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
else:
# cast all args to strings ansible-modules-core/issues/4397
cmd = [str(x) for x in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def backup_shadow(self):
if not self.module.check_mode and self.SHADOWFILE:
return self.module.backup_local(self.SHADOWFILE)
def remove_user_userdel(self):
if self.local:
command_name = 'luserdel'
else:
command_name = 'userdel'
cmd = [self.module.get_bin_path(command_name, True)]
if self.force and not self.local:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self):
if self.local:
command_name = 'luseradd'
lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
lchage_cmd = self.module.get_bin_path('lchage', True)
else:
command_name = 'useradd'
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.seuser is not None:
cmd.append('-Z')
cmd.append(self.seuser)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if self.local:
# luseradd uses -n instead of -N
cmd.append('-n')
else:
if os.path.exists('/etc/redhat-release'):
dist = distro.version()
major_release = int(dist.split('.')[0])
if major_release <= 5:
cmd.append('-n')
else:
cmd.append('-N')
elif os.path.exists('/etc/SuSE-release'):
# -N did not exist in useradd before SLE 11 and did not
# automatically create a group
dist = distro.version()
major_release = int(dist.split('.')[0])
if major_release >= 12:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
if not self.local:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
# If the specified path to the user home contains parent directories that
# do not exist and create_home is True first create the parent directory
# since useradd cannot create it.
if self.create_home:
parent = os.path.dirname(self.home)
if not os.path.isdir(parent):
self.create_homedir(self.home)
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires is not None and not self.local:
cmd.append('-e')
if self.expires < time.gmtime(0):
cmd.append('')
else:
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.inactive is not None:
cmd.append('-f')
cmd.append(int(self.inactive))
if self.password is not None:
cmd.append('-p')
if self.password_lock:
cmd.append('!%s' % self.password)
else:
cmd.append(self.password)
if self.create_home:
if not self.local:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if not self.local or rc != 0:
return (rc, out, err)
if self.expires is not None:
if self.expires < time.gmtime(0):
lexpires = -1
else:
# Convert seconds since Epoch to days since Epoch
lexpires = int(math.floor(self.module.params['expires'])) // 86400
(rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
if self.groups is None or len(self.groups) == 0:
return (rc, out, err)
for add_group in groups:
(rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
return (rc, out, err)
def _check_usermod_append(self):
# check if this version of usermod can append groups
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
usermod_path = self.module.get_bin_path(command_name, True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path, '--help']
(rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
helpout = data1 + data2
# check if --append exists
lines = to_native(helpout).split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
if self.local:
command_name = 'lusermod'
lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
lgroupmod_add = set()
lgroupmod_del = set()
lchage_cmd = self.module.get_bin_path('lchage', True)
lexpires = None
else:
command_name = 'usermod'
cmd = [self.module.get_bin_path(command_name, True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(ginfo[2])
if self.groups is not None:
# get a list of all groups for the user, including the primary
current_groups = self.user_group_membership(exclude_primary=False)
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False, names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.local:
if self.append:
lgroupmod_add = set(groups).difference(current_groups)
lgroupmod_del = set()
else:
lgroupmod_add = set(groups).difference(current_groups)
lgroupmod_del = set(current_groups).difference(groups)
else:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires is not None:
current_expires = self.user_password()[1] or '0'
current_expires = int(current_expires)
if self.expires < time.gmtime(0):
if current_expires >= 0:
if self.local:
lexpires = -1
else:
cmd.append('-e')
cmd.append('')
else:
# Convert days since Epoch to seconds since Epoch as struct_time
current_expire_date = time.gmtime(current_expires * 86400)
# Current expires is negative or we compare year, month, and day only
if current_expires < 0 or current_expire_date[:3] != self.expires[:3]:
if self.local:
# Convert seconds since Epoch to days since Epoch
lexpires = int(math.floor(self.module.params['expires'])) // 86400
else:
cmd.append('-e')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
# Lock if no password or unlocked, unlock only if locked
if self.password_lock and not info[1].startswith('!'):
cmd.append('-L')
elif self.password_lock is False and info[1].startswith('!'):
# usermod will refuse to unlock a user with no password, module shows 'changed' regardless
cmd.append('-U')
if self.update_password == 'always' and self.password is not None and info[1].lstrip('!') != self.password.lstrip('!'):
# Remove options that are mutually exclusive with -p
cmd = [c for c in cmd if c not in ['-U', '-L']]
cmd.append('-p')
if self.password_lock:
# Lock the account and set the hash in a single command
cmd.append('!%s' % self.password)
else:
cmd.append(self.password)
(rc, out, err) = (None, '', '')
# skip if no usermod changes to be made
if len(cmd) > 1:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if not self.local or not (rc is None or rc == 0):
return (rc, out, err)
if lexpires is not None:
(rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
if len(lgroupmod_add) == 0 and len(lgroupmod_del) == 0:
return (rc, out, err)
for add_group in lgroupmod_add:
(rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
for del_group in lgroupmod_del:
(rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-m', self.name, del_group])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
return (rc, out, err)
def group_exists(self, group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True, names_only=False):
if self.groups is None:
return None
info = self.user_info()
groups = set(x.strip() for x in self.groups.split(',') if x)
group_names = set()
for g in groups.copy():
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
group_info = self.group_info(g)
if info and remove_existing and group_info[2] == info[3]:
groups.remove(g)
elif names_only:
group_names.add(group_info[0])
if names_only:
return group_names
return groups
def user_group_membership(self, exclude_primary=True):
""" Return a list of groups the user belongs to """
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem:
# Exclude the user's primary group by default
if not exclude_primary:
groups.append(group[0])
else:
if info[3] != group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
# The pwd module does not distinguish between local and directory accounts.
# It's output cannot be used to determine whether or not an account exists locally.
# It returns True if the account exists locally or in the directory, so instead
# look in the local PASSWORD file for an existing account.
if self.local:
if not os.path.exists(self.PASSWORDFILE):
self.module.fail_json(msg="'local: true' specified but unable to find local account file {0} to parse.".format(self.PASSWORDFILE))
exists = False
name_test = '{0}:'.format(self.name)
with open(self.PASSWORDFILE, 'rb') as f:
reversed_lines = f.readlines()[::-1]
for line in reversed_lines:
if line.startswith(to_bytes(name_test)):
exists = True
break
return exists
else:
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()[0]
return info
def set_password_expire(self):
min_needs_change = self.password_expire_min is not None
max_needs_change = self.password_expire_max is not None
warn_needs_change = self.password_expire_warn is not None
if HAVE_SPWD:
try:
shadow_info = getspnam(to_bytes(self.name))
except ValueError:
return None, '', ''
min_needs_change &= self.password_expire_min != shadow_info.sp_min
max_needs_change &= self.password_expire_max != shadow_info.sp_max
warn_needs_change &= self.password_expire_warn != shadow_info.sp_warn
if not (min_needs_change or max_needs_change or warn_needs_change):
return (None, '', '') # target state already reached
command_name = 'chage'
cmd = [self.module.get_bin_path(command_name, True)]
if min_needs_change:
cmd.extend(["-m", self.password_expire_min])
if max_needs_change:
cmd.extend(["-M", self.password_expire_max])
if warn_needs_change:
cmd.extend(["-W", self.password_expire_warn])
cmd.append(self.name)
return self.execute_command(cmd)
def user_password(self):
passwd = ''
expires = ''
if HAVE_SPWD:
try:
shadow_info = getspnam(to_bytes(self.name))
passwd = to_native(shadow_info.sp_pwdp)
expires = shadow_info.sp_expire
return passwd, expires
except ValueError:
return passwd, expires
if not self.user_exists():
return passwd, expires
elif self.SHADOWFILE:
passwd, expires = self.parse_shadow_file()
return passwd, expires
def parse_shadow_file(self):
passwd = ''
expires = ''
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'r') as f:
for line in f:
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
expires = line.split(':')[self.SHADOWFILE_EXPIRE_INDEX] or -1
return passwd, expires
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
if not os.path.exists(info[5]) and not self.module.check_mode:
raise Exception('User %s home directory does not exist' % self.name)
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
overwrite = None
try:
ssh_key_file = self.get_ssh_key_path()
pub_file = f'{ssh_key_file}.pub'
except Exception as e:
return (1, '', to_native(e))
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
if self.module.check_mode:
return (0, '', '')
try:
os.mkdir(ssh_dir, int('0700', 8))
os.chown(ssh_dir, info[2], info[3])
except OSError as e:
return (1, '', 'Failed to create %s: %s' % (ssh_dir, to_native(e)))
if os.path.exists(ssh_key_file):
if self.force:
self.module.warn(f'Overwriting existing ssh key private file "{ssh_key_file}"')
overwrite = 'y'
else:
self.module.warn(f'Found existing ssh key private file "{ssh_key_file}", no force, so skipping ssh-keygen generation')
return (None, 'Key already exists, use "force: yes" to overwrite', '')
if os.path.exists(pub_file):
if self.force:
self.module.warn(f'Overwriting existing ssh key public file "{pub_file}"')
os.unlink(pub_file)
else:
self.module.warn(f'Found existing ssh key public file "{pub_file}", no force, so skipping ssh-keygen generation')
return (None, 'Public key already exists, use "force: yes" to overwrite', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
if self.ssh_bits > 0:
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
if self.ssh_passphrase is not None:
if self.module.check_mode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
master_in_fd, slave_in_fd = pty.openpty()
master_out_fd, slave_out_fd = pty.openpty()
master_err_fd, slave_err_fd = pty.openpty()
env = os.environ.copy()
env['LC_ALL'] = get_best_parsable_locale(self.module)
try:
p = subprocess.Popen([to_bytes(c) for c in cmd],
stdin=slave_in_fd,
stdout=slave_out_fd,
stderr=slave_err_fd,
preexec_fn=os.setsid,
env=env)
out_buffer = b''
err_buffer = b''
while p.poll() is None:
r_list = select.select([master_out_fd, master_err_fd], [], [], 1)[0]
first_prompt = b'Enter passphrase (empty for no passphrase):'
second_prompt = b'Enter same passphrase again'
prompt = first_prompt
for fd in r_list:
if fd == master_out_fd:
chunk = os.read(master_out_fd, 10240)
out_buffer += chunk
if prompt in out_buffer:
os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
prompt = second_prompt
else:
chunk = os.read(master_err_fd, 10240)
err_buffer += chunk
if prompt in err_buffer:
os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
prompt = second_prompt
if b'Overwrite (y/n)?' in out_buffer or b'Overwrite (y/n)?' in err_buffer:
# The key was created between us checking for existence and now
return (None, 'Key already exists', '')
rc = p.returncode
out = to_native(out_buffer)
err = to_native(err_buffer)
except OSError as e:
return (1, '', to_native(e))
else:
cmd.append('-N')
cmd.append('')
(rc, out, err) = self.execute_command(cmd, data=overwrite)
if rc == 0 and not self.module.check_mode:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown(pub_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd, obey_checkmode=False)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
with open(ssh_public_key_file, 'r') as f:
ssh_public_key = f.read().strip()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
if self.skeleton is not None:
skeleton = self.skeleton
else:
skeleton = '/etc/skel'
if os.path.exists(skeleton) and skeleton != os.devnull:
try:
shutil.copytree(skeleton, path, symlinks=True)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
else:
try:
os.makedirs(path)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# get umask from /etc/login.defs and set correct home mode
if os.path.exists(self.LOGIN_DEFS):
with open(self.LOGIN_DEFS, 'r') as f:
for line in f:
m = re.match(r'^UMASK\s+(\d+)$', line)
if m:
umask = int(m.group(1), 8)
mode = 0o777 & ~umask
try:
os.chmod(path, mode)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
full_path = os.path.join(root, f)
if not os.path.islink(full_path):
os.chown(full_path, uid, gid)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
SHADOWFILE_EXPIRE_INDEX = 6
DATE_FORMAT = '%d-%b-%Y'
def _handle_lock(self):
info = self.user_info()
if self.password_lock and not info[1].startswith('*LOCKED*'):
cmd = [
self.module.get_bin_path('pw', True),
'lock',
self.name
]
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
return self.execute_command(cmd)
elif self.password_lock is False and info[1].startswith('*LOCKED*'):
cmd = [
self.module.get_bin_path('pw', True),
'unlock',
self.name
]
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
return self.execute_command(cmd)
return (None, '', '')
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires is not None:
cmd.append('-e')
if self.expires < time.gmtime(0):
cmd.append('0')
else:
cmd.append(str(calendar.timegm(self.expires)))
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
_rc, _out, _err = self.execute_command(cmd)
if rc is None:
rc = _rc
out += _out
err += _err
# we have to lock/unlock the password in a distinct command
_rc, _out, _err = self._handle_lock()
if rc is None:
rc = _rc
out += _out
err += _err
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
if (info[5] != self.home and self.move_home) or (not os.path.exists(self.home) and self.create_home):
cmd.append('-m')
if info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'r') as f:
for line in f:
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set(names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires is not None:
current_expires = self.user_password()[1] or '0'
current_expires = int(current_expires)
# If expiration is negative or zero and the current expiration is greater than zero, disable expiration.
# In OpenBSD, setting expiration to zero disables expiration. It does not expire the account.
if self.expires <= time.gmtime(0):
if current_expires > 0:
cmd.append('-e')
cmd.append('0')
else:
# Convert days since Epoch to seconds since Epoch as struct_time
current_expire_date = time.gmtime(current_expires)
# Current expires is negative or we compare year, month, and day only
if current_expires <= 0 or current_expire_date[:3] != self.expires[:3]:
cmd.append('-e')
cmd.append(str(calendar.timegm(self.expires)))
(rc, out, err) = (None, '', '')
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, _out, _err) = self.execute_command(cmd)
out += _out
err += _err
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1].lstrip('*LOCKED*') != self.password.lstrip('*LOCKED*'):
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
_rc, _out, _err = self.execute_command(cmd)
if rc is None:
rc = _rc
out += _out
err += _err
# we have to lock/unlock the password in a distinct command
_rc, _out, _err = self._handle_lock()
if rc is None:
rc = _rc
out += _out
err += _err
return (rc, out, err)
class DragonFlyBsdUser(FreeBsdUser):
"""
This is a DragonFlyBSD User manipulation class - it inherits the
FreeBsdUser class behaviors, such as using the pw command to
manipulate the user database, followed by the chpass command
to change the password.
"""
platform = 'DragonFly'
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None and self.password != '*':
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-S'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-G'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.password_lock and not info[1].startswith('*'):
cmd.append('-Z')
elif self.password_lock is False and info[1].startswith('*'):
cmd.append('-U')
if self.update_password == 'always' and self.password is not None \
and self.password != '*' and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
if self.password_lock and not info[1].startswith('*LOCKED*'):
cmd.append('-C yes')
elif self.password_lock is False and info[1].startswith('*LOCKED*'):
cmd.append('-C no')
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
- user_info()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
USER_ATTR = '/etc/user_attr'
def get_password_defaults(self):
# Read password aging defaults
try:
minweeks = ''
maxweeks = ''
warnweeks = ''
with open("/etc/default/passwd", 'r') as f:
for line in f:
line = line.strip()
if (line.startswith('#') or line == ''):
continue
m = re.match(r'^([^#]*)#(.*)$', line)
if m: # The line contains a hash / comment
line = m.group(1)
key, value = line.split('=')
if key == "MINWEEKS":
minweeks = value.rstrip('\n')
elif key == "MAXWEEKS":
maxweeks = value.rstrip('\n')
elif key == "WARNWEEKS":
warnweeks = value.rstrip('\n')
except Exception as err:
self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % to_native(err))
return (minweeks, maxweeks, warnweeks)
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.profile is not None:
cmd.append('-P')
cmd.append(self.profile)
if self.authorization is not None:
cmd.append('-A')
cmd.append(self.authorization)
if self.role is not None:
cmd.append('-R')
cmd.append(self.role)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if not self.module.check_mode:
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
self.backup_shadow()
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
with open(self.SHADOWFILE, 'rb') as f:
for line in f:
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
try:
fields[3] = str(int(minweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
if maxweeks:
try:
fields[4] = str(int(maxweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
if warnweeks:
try:
fields[5] = str(int(warnweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
line = ':'.join(fields)
lines.append('%s\n' % line)
with open(self.SHADOWFILE, 'w+') as f:
f.writelines(lines)
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set(names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.profile is not None and info[7] != self.profile:
cmd.append('-P')
cmd.append(self.profile)
if self.authorization is not None and info[8] != self.authorization:
cmd.append('-A')
cmd.append(self.authorization)
if self.role is not None and info[9] != self.role:
cmd.append('-R')
cmd.append(self.role)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
self.backup_shadow()
(rc, out, err) = (0, '', '')
if not self.module.check_mode:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
with open(self.SHADOWFILE, 'rb') as f:
for line in f:
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
with open(self.SHADOWFILE, 'w+') as f:
f.writelines(lines)
rc = 0
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
def user_info(self):
info = super(SunOS, self).user_info()
if info:
info += self._user_attr_info()
return info
def _user_attr_info(self):
info = [''] * 3
with open(self.USER_ATTR, 'r') as file_handler:
for line in file_handler:
lines = line.strip().split('::::')
if lines[0] == self.name:
tmp = dict(x.split('=') for x in lines[1].split(';'))
info[0] = tmp.get('profiles', '')
info[1] = tmp.get('auths', '')
info[2] = tmp.get('roles', '')
return info
class DarwinUser(User):
"""
This is a Darwin macOS User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
('hidden', 'IsHidden'),
]
def __init__(self, module):
super(DarwinUser, self).__init__(module)
# make the user hidden if option is set or defer to system option
if self.hidden is None:
if self.system:
self.hidden = 1
elif self.hidden:
self.hidden = 1
else:
self.hidden = 0
# add hidden to processing if set
if self.hidden is not None:
self.fields.append(('hidden', 'IsHidden'))
def _get_dscl(self):
return [self.module.get_bin_path('dscl', True), self.dscl_directory]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += ['-search', '/Groups', 'GroupMembership', self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
"""Return user PROPERTY as given my dscl(1) read or None if not found."""
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, property]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
# sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
if len(lines) > 2:
return '\n'.join([lines[1].strip()] + lines[2:])
if len(lines) == 2:
return lines[1].strip()
return None
def _get_next_uid(self, system=None):
"""
Return the next available uid. If system=True, then
uid should be below of 500, if possible.
"""
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
max_system_uid = 0
for line in out.splitlines():
current_uid = int(line.split(' ')[-1])
if max_uid < current_uid:
max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid
if system and (0 < max_system_uid < 499):
return max_system_uid + 1
return max_uid + 1
def _change_user_password(self):
"""Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleartext.
"""
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += ['-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
"""Convert SELF.GROUP to is stringed numerical value suitable for dscl."""
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
"""Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. """
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group), err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
"""Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. """
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = self.get_groups_set(names_only=True)
else:
target = set([])
if self.append is False:
for remove in current - target:
(_rc, _out, _err) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _out, _err) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, out, err, changed)
def _update_system_user(self):
"""Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwise."""
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if self.name not in hidden_users:
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del (hidden_users[hidden_users.index(self.name)])
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
"""Check is SELF.NAME is a known user on the system."""
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
return rc == 0
def remove_user(self):
"""Delete SELF.NAME. If SELF.FORCE is true, remove its home directory."""
info = self.user_info()
cmd = self._get_dscl()
cmd += ['-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
# Make the Gecos (alias display name) default to username
if self.comment is None:
self.comment = self.name
# Make user group default to 'staff'
if self.group is None:
self.group = 'staff'
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid(self.system))
# Homedir is not created by default
if self.create_home:
if self.home is None:
self.home = '/Users/%s' % self.name
if not self.module.check_mode:
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
# dscl sets shell to /usr/bin/false when UserShell is not specified
# so set the shell to /bin/bash when the user is not a system user
if not self.system and self.shell is None:
self.shell = '/bin/bash'
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _out, _err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _out, _err)
(rc, _out, _err) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
if self.groups:
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, out, err)
def modify_user(self):
changed = None
out = ''
err = ''
if self.group:
self._make_group_numerical()
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != to_text(self.__dict__[field[0]]):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _out, _err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _out, _err) = self._change_user_password()
out += _out
err += _err
changed = rc
if self.groups:
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
- parse_shadow_file()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc is not None:
return (rc, out + out2, err + err2)
else:
return (rc2, out + out2, err + err2)
def parse_shadow_file(self):
"""Example AIX shadowfile data:
nobody:
password = *
operator1:
password = {ssha512}06$xxxxxxxxxxxx....
lastupdate = 1549558094
test1:
password = *
lastupdate = 1553695126
"""
b_name = to_bytes(self.name)
b_passwd = b''
b_expires = b''
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'rb') as bf:
b_lines = bf.readlines()
b_passwd_line = b''
b_expires_line = b''
try:
for index, b_line in enumerate(b_lines):
# Get password and lastupdate lines which come after the username
if b_line.startswith(b'%s:' % b_name):
b_passwd_line = b_lines[index + 1]
b_expires_line = b_lines[index + 2]
break
# Sanity check the lines because sometimes both are not present
if b' = ' in b_passwd_line:
b_passwd = b_passwd_line.split(b' = ', 1)[-1].strip()
if b' = ' in b_expires_line:
b_expires = b_expires_line.split(b' = ', 1)[-1].strip()
except IndexError:
self.module.fail_json(msg='Failed to parse shadow file %s' % self.SHADOWFILE)
passwd = to_native(b_passwd)
expires = to_native(b_expires) or -1
return passwd, expires
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False, names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-F')
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class BusyBox(User):
"""
This is the BusyBox class for use on systems that have adduser, deluser,
and delgroup commands. It overrides the following methods:
- create_user()
- remove_user()
- modify_user()
"""
def create_user(self):
cmd = [self.module.get_bin_path('adduser', True)]
cmd.append('-D')
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg='Group {0} does not exist'.format(self.group))
cmd.append('-G')
cmd.append(self.group)
if self.comment is not None:
cmd.append('-g')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-h')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if not self.create_home:
cmd.append('-H')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.system:
cmd.append('-S')
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
cmd.append(self.name)
rc, out, err = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if self.password is not None:
cmd = [self.module.get_bin_path('chpasswd', True)]
cmd.append('--encrypted')
data = '{name}:{password}'.format(name=self.name, password=self.password)
rc, out, err = self.execute_command(cmd, data=data)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# Add to additional groups
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
add_cmd_bin = self.module.get_bin_path('adduser', True)
for group in groups:
cmd = [add_cmd_bin, self.name, group]
rc, out, err = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
return rc, out, err
def remove_user(self):
cmd = [
self.module.get_bin_path('deluser', True),
self.name
]
if self.remove:
cmd.append('--remove-home')
return self.execute_command(cmd)
def modify_user(self):
current_groups = self.user_group_membership()
groups = []
rc = None
out = ''
err = ''
info = self.user_info()
add_cmd_bin = self.module.get_bin_path('adduser', True)
remove_cmd_bin = self.module.get_bin_path('delgroup', True)
# Manage group membership
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
for g in groups:
if g in group_diff:
add_cmd = [add_cmd_bin, self.name, g]
rc, out, err = self.execute_command(add_cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
for g in group_diff:
if g not in groups and not self.append:
remove_cmd = [remove_cmd_bin, self.name, g]
rc, out, err = self.execute_command(remove_cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# Manage password
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [self.module.get_bin_path('chpasswd', True)]
cmd.append('--encrypted')
data = '{name}:{password}'.format(name=self.name, password=self.password)
rc, out, err = self.execute_command(cmd, data=data)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
return rc, out, err
class Alpine(BusyBox):
"""
This is the Alpine User manipulation class. It inherits the BusyBox class
behaviors such as using adduser and deluser commands.
"""
platform = 'Linux'
distribution = 'Alpine'
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='int'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list', elements='str'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='path'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
password_expire_max=dict(type='int', no_log=False),
password_expire_min=dict(type='int', no_log=False),
password_expire_warn=dict(type='int', no_log=False),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create'], no_log=False),
expires=dict(type='float'),
password_lock=dict(type='bool', no_log=False),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
umask=dict(type='str'),
password_expire_account_disable=dict(type='int', no_log=False),
uid_min=dict(type='int'),
uid_max=dict(type='int'),
),
supports_check_mode=True,
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
# Check to see if the provided home path contains parent directories
# that do not exist.
path_needs_parents = False
if user.home and user.create_home:
parent = os.path.dirname(user.home)
if not os.path.isdir(parent):
path_needs_parents = True
(rc, out, err) = user.create_user()
# If the home path had parent directories that needed to be created,
# make sure file permissions are correct in the created home directory.
if path_needs_parents:
info = user.user_info()
if info is not False:
user.chown_homedir(info[2], info[3], user.home)
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists() and user.state == 'present':
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
(rc, out, err) = user.set_password_expire()
if rc is None:
pass # target state reached, nothing to do
else:
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
else:
result['changed'] = True
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
| 123,301
|
Python
|
.py
| 2,915
| 30.095712
| 159
| 0.543816
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,138
|
apt.py
|
ansible_ansible/lib/ansible/modules/apt.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Flowroute LLC
# Written by Matthew Williams <matthew@flowroute.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: apt
short_description: Manages apt-packages
description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
name:
description:
- A list of package names, like V(foo), or package specifier with version, like V(foo=1.0) or V(foo>=1.0).
Name wildcards (fnmatch) like V(apt*) and version wildcards like V(foo=1.0*) are also supported.
- Do not use single or double quotes around the version when referring to the package name with a specific version, such as V(foo=1.0) or V(foo>=1.0).
aliases: [ package, pkg ]
type: list
elements: str
state:
description:
- Indicates the desired package state. V(latest) ensures that the latest version is installed. V(build-dep) ensures the package build dependencies
are installed. V(fixed) attempt to correct a system with broken dependencies in place.
type: str
default: present
choices: [ absent, build-dep, latest, present, fixed ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
- Default is not to update the cache.
aliases: [ update-cache ]
type: bool
update_cache_retries:
description:
- Amount of retries if the cache update fails. Also see O(update_cache_retry_max_delay).
type: int
default: 5
version_added: '2.10'
update_cache_retry_max_delay:
description:
- Use an exponential backoff delay for each retry (see O(update_cache_retries)) up to this max delay in seconds.
type: int
default: 12
version_added: '2.10'
cache_valid_time:
description:
- Update the apt cache if it is older than the O(cache_valid_time). This option is set in seconds.
- As of Ansible 2.4, if explicitly set, this sets O(update_cache=yes).
type: int
default: 0
purge:
description:
- Will force purging of configuration files if O(state=absent) or O(autoremove=yes).
type: bool
default: 'no'
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities.
aliases: [ default-release ]
type: str
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for C(apt). V(true) installs recommended packages. V(false) does not install
recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
aliases: [ install-recommends ]
type: bool
force:
description:
- 'Corresponds to the C(--force-yes) to C(apt-get) and implies O(allow_unauthenticated=yes) and O(allow_downgrade=yes).'
- "This option will disable checking both the packages' signatures and the certificates of the web servers they are downloaded from."
- 'This option *is not* the equivalent of passing the C(-f) flag to C(apt-get) on the command line.'
- '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
Please also see C(man apt-get) for more information.'
type: bool
default: 'no'
clean:
description:
- Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
- Can be run as part of the package installation (clean runs before install) or as a separate step.
type: bool
default: 'no'
version_added: "2.13"
allow_unauthenticated:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
- 'O(allow_unauthenticated) is only supported with O(state): V(install)/V(present).'
aliases: [ allow-unauthenticated ]
type: bool
default: 'no'
version_added: "2.1"
allow_downgrade:
description:
- Corresponds to the C(--allow-downgrades) option for I(apt).
- This option enables the named package and version to replace an already installed higher version of that package.
- Note that setting O(allow_downgrade=true) can make this module behave in a non-idempotent way.
- (The task could end up with a set of packages that does not match the complete list of specified packages to install).
- 'O(allow_downgrade) is only supported by C(apt) and will be ignored if C(aptitude) is detected or specified.'
aliases: [ allow-downgrade, allow_downgrades, allow-downgrades ]
type: bool
default: 'no'
version_added: "2.12"
allow_change_held_packages:
description:
- Allows changing the version of a package which is on the apt hold list.
type: bool
default: 'no'
version_added: '2.13'
upgrade:
description:
- If yes or safe, performs an aptitude safe-upgrade.
- If full, performs an aptitude full-upgrade.
- If dist, performs an apt-get dist-upgrade.
- 'Note: This does not upgrade a specific package, use state=latest for that.'
- 'Note: Since 2.4, apt-get is used as a fall-back if aptitude is not present.'
version_added: "1.1"
choices: [ dist, full, 'no', safe, 'yes' ]
default: 'no'
type: str
dpkg_options:
description:
- Add C(dpkg) options to C(apt) command. Defaults to C(-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold").
- Options should be supplied as comma separated list.
default: force-confdef,force-confold
type: str
deb:
description:
- Path to a .deb package on the remote machine.
- If C(://) in the path, ansible will attempt to download deb before installing. (Version added 2.1)
- Requires the C(xz-utils) package to extract the control file of the deb package to install.
type: path
required: false
version_added: "1.6"
autoremove:
description:
- If V(true), remove unused dependency packages for all module states except V(build-dep). It can also be used as the only option.
- Previous to version 2.4, O(autoclean) was also an alias for O(autoremove), now it is its own separate command.
See documentation for further information.
type: bool
default: 'no'
version_added: "2.1"
autoclean:
description:
- If V(true), cleans the local repository of retrieved package files that can no longer be downloaded.
type: bool
default: 'no'
version_added: "2.4"
policy_rc_d:
description:
- Force the exit code of C(/usr/sbin/policy-rc.d).
- For example, if O(policy_rc_d=101) the installed package will not trigger a service start.
- If C(/usr/sbin/policy-rc.d) already exists, it is backed up and restored after the package installation.
- If V(null), the C(/usr/sbin/policy-rc.d) is not created/changed.
type: int
default: null
version_added: "2.8"
only_upgrade:
description:
- Only upgrade a package if it is already installed.
type: bool
default: 'no'
version_added: "2.1"
fail_on_autoremove:
description:
- 'Corresponds to the C(--no-remove) option for C(apt).'
- 'If V(true), it is ensured that no packages will be removed or the task will fail.'
- 'O(fail_on_autoremove) is only supported with O(state) except V(absent).'
- 'O(fail_on_autoremove) is only supported by C(apt) and will be ignored if C(aptitude) is detected or specified.'
type: bool
default: 'no'
version_added: "2.11"
force_apt_get:
description:
- Force usage of apt-get instead of aptitude.
type: bool
default: 'no'
version_added: "2.4"
lock_timeout:
description:
- How many seconds will this action wait to acquire a lock on the apt db.
- Sometimes there is a transitory lock and this will retry at least until timeout is hit.
type: int
default: 60
version_added: "2.12"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
- aptitude (before 2.4)
author: "Matthew Williams (@mgwilliams)"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: debian
notes:
- Three of the upgrade modes (V(full), V(safe) and its alias V(true)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
- In most cases, packages installed with I(apt) will start newly installed services by default. Most distributions have mechanisms to avoid this.
For example when installing Postgresql-9.5 in Debian 9, creating an executable shell script (/usr/sbin/policy-rc.d) that throws
a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or its execute permission afterward.
- The C(apt-get) commandline supports implicit regex matches here but we do not because it can let typos through easier
(If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
Since there are no warnings and prompts before installing, we disallow this. Use an explicit fnmatch pattern if you want wildcarding).
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
- When O(default_release) is used, an implicit priority of 990 is used. This is the same behavior as C(apt-get -t).
- When an exact version is specified, an implicit priority of 1001 is used.
- If the interpreter can't import C(python-apt)/C(python3-apt) the module will check for it in system-owned interpreters as well.
If the dependency can't be found, the module will attempt to install it.
If the dependency is found or installed, the module will be respawned under the correct interpreter.
"""
EXAMPLES = """
- name: Install apache httpd (state=present is optional)
ansible.builtin.apt:
name: apache2
state: present
- name: Update repositories cache and install "foo" package
ansible.builtin.apt:
name: foo
update_cache: yes
- name: Remove "foo" package
ansible.builtin.apt:
name: foo
state: absent
- name: Install the package "foo"
ansible.builtin.apt:
name: foo
- name: Install a list of packages
ansible.builtin.apt:
pkg:
- foo
- foo-tools
- name: Install the version '1.00' of package "foo"
ansible.builtin.apt:
name: foo=1.00
- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
ansible.builtin.apt:
name: nginx
state: latest
default_release: squeeze-backports
update_cache: yes
- name: Install the version '1.18.0' of package "nginx" and allow potential downgrades
ansible.builtin.apt:
name: nginx=1.18.0
state: present
allow_downgrade: yes
- name: Install zfsutils-linux with ensuring conflicted packages (e.g. zfs-fuse) will not be removed.
ansible.builtin.apt:
name: zfsutils-linux
state: latest
fail_on_autoremove: yes
- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
ansible.builtin.apt:
name: openjdk-6-jdk
state: latest
install_recommends: no
- name: Update all packages to their latest version
ansible.builtin.apt:
name: "*"
state: latest
- name: Upgrade the OS (apt-get dist-upgrade)
ansible.builtin.apt:
upgrade: dist
- name: Run the equivalent of "apt-get update" as a separate step
ansible.builtin.apt:
update_cache: yes
- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
ansible.builtin.apt:
update_cache: yes
cache_valid_time: 3600
- name: Pass options to dpkg on run
ansible.builtin.apt:
upgrade: dist
update_cache: yes
dpkg_options: 'force-confold,force-confdef'
- name: Install a .deb package
ansible.builtin.apt:
deb: /tmp/mypackage.deb
- name: Install the build dependencies for package "foo"
ansible.builtin.apt:
pkg: foo
state: build-dep
- name: Install a .deb package from the internet
ansible.builtin.apt:
deb: https://example.com/python-ppq_0.1-1_all.deb
- name: Remove useless packages from the cache
ansible.builtin.apt:
autoclean: yes
- name: Remove dependencies that are no longer required
ansible.builtin.apt:
autoremove: yes
- name: Remove dependencies that are no longer required and purge their configuration files
ansible.builtin.apt:
autoremove: yes
purge: true
- name: Run the equivalent of "apt-get clean" as a separate step
ansible.builtin.apt:
clean: yes
"""
RETURN = """
cache_updated:
description: if the cache was updated or not
returned: success, in some cases
type: bool
sample: True
cache_update_time:
description: time of the last cache update (0 if unknown)
returned: success, in some cases
type: int
sample: 1425828348000
stdout:
description: output from apt
returned: success, when needed
type: str
sample: |-
Reading package lists...
Building dependency tree...
Reading state information...
The following extra packages will be installed:
apache2-bin ...
stderr:
description: error output from apt
returned: success, when needed
type: str
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
""" # NOQA
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import datetime
import fnmatch
import locale as locale_module
import os
import re
import secrets
import shutil
import sys
import tempfile
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.file import S_IRWXU_RXG_RXO
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.six import string_types
from ansible.module_utils.urls import fetch_file
DPKG_OPTIONS = 'force-confdef,force-confold'
APT_GET_ZERO = "\n0 upgraded, 0 newly installed, 0 to remove"
APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed, 0 to remove"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
APT_MARK_INVALID_OP = 'Invalid operation'
APT_MARK_INVALID_OP_DEB6 = 'Usage: apt-mark [options] {markauto|unmarkauto} packages'
CLEAN_OP_CHANGED_STR = dict(
autoremove='The following packages will be REMOVED',
# "Del python3-q 2.4-1 [24 kB]"
autoclean='Del ',
)
HAS_PYTHON_APT = False
try:
import apt
import apt.debfile
import apt_pkg
HAS_PYTHON_APT = True
except ImportError:
apt = apt_pkg = None
class PolicyRcD(object):
"""
This class is a context manager for the /usr/sbin/policy-rc.d file.
It allow the user to prevent dpkg to start the corresponding service when installing
a package.
https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
"""
def __init__(self, module):
# we need the module for later use (eg. fail_json)
self.m = module
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
# if the /usr/sbin/policy-rc.d already exists
# we will back it up during package installation
# then restore it
if os.path.exists('/usr/sbin/policy-rc.d'):
self.backup_dir = tempfile.mkdtemp(prefix="ansible")
else:
self.backup_dir = None
def __enter__(self):
"""
This method will be called when we enter the context, before we call `apt-get …`
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
# if the /usr/sbin/policy-rc.d already exists we back it up
if self.backup_dir:
try:
shutil.move('/usr/sbin/policy-rc.d', self.backup_dir)
except Exception:
self.m.fail_json(msg="Fail to move /usr/sbin/policy-rc.d to %s" % self.backup_dir)
# we write /usr/sbin/policy-rc.d so it always exits with code policy_rc_d
try:
with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
os.chmod('/usr/sbin/policy-rc.d', S_IRWXU_RXG_RXO)
except Exception:
self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d")
def __exit__(self, type, value, traceback):
"""
This method will be called when we exit the context, after `apt-get …` is done
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
if self.backup_dir:
# if /usr/sbin/policy-rc.d already exists before the call to __enter__
# we restore it (from the backup done in __enter__)
try:
shutil.move(os.path.join(self.backup_dir, 'policy-rc.d'),
'/usr/sbin/policy-rc.d')
os.rmdir(self.backup_dir)
except Exception:
self.m.fail_json(msg="Fail to move back %s to /usr/sbin/policy-rc.d"
% os.path.join(self.backup_dir, 'policy-rc.d'))
else:
# if there wasn't a /usr/sbin/policy-rc.d file before the call to __enter__
# we just remove the file
try:
os.remove('/usr/sbin/policy-rc.d')
except Exception:
self.m.fail_json(msg="Fail to remove /usr/sbin/policy-rc.d (after package manipulation)")
def package_split(pkgspec):
parts = re.split(r'(>?=)', pkgspec, 1)
if len(parts) > 1:
return parts
return parts[0], None, None
def package_version_compare(version, other_version):
try:
return apt_pkg.version_compare(version, other_version)
except AttributeError:
return apt_pkg.VersionCompare(version, other_version)
def package_best_match(pkgname, version_cmp, version, release, cache):
policy = apt_pkg.Policy(cache)
policy.read_pinfile(apt_pkg.config.find_file("Dir::Etc::preferences"))
policy.read_pindir(apt_pkg.config.find_file("Dir::Etc::preferencesparts"))
if release:
# 990 is the priority used in `apt-get -t`
policy.create_pin('Release', pkgname, release, 990)
if version_cmp == "=":
# Installing a specific version from command line overrides all pinning
# We don't mimic this exactly, but instead set a priority which is higher than all APT built-in pin priorities.
policy.create_pin('Version', pkgname, version, 1001)
pkg = cache[pkgname]
pkgver = policy.get_candidate_ver(pkg)
if not pkgver:
return None
if version_cmp == "=" and not fnmatch.fnmatch(pkgver.ver_str, version):
# Even though we put in a pin policy, it can be ignored if there is no
# possible candidate.
return None
return pkgver.ver_str
def package_status(m, pkgname, version_cmp, version, default_release, cache, state):
"""
:return: A tuple of (installed, installed_version, version_installable, has_files). *installed* indicates whether
the package (regardless of version) is installed. *installed_version* indicates whether the installed package
matches the provided version criteria. *version_installable* provides the latest matching version that can be
installed. In the case of virtual packages where we can't determine an applicable match, True is returned.
*has_files* indicates whether the package has files on the filesystem (even if not installed, meaning a purge is
required).
"""
try:
# get the package from the cache, as well as the
# low-level apt_pkg.Package object which contains
# state fields not directly accessible from the
# higher-level apt.package.Package object.
pkg = cache[pkgname]
ll_pkg = cache._cache[pkgname] # the low-level package object
except KeyError:
if state == 'install':
try:
provided_packages = cache.get_providing_packages(pkgname)
if provided_packages:
# When this is a virtual package satisfied by only
# one installed package, return the status of the target
# package to avoid requesting re-install
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
package = provided_packages[0]
installed, installed_version, version_installable, has_files = \
package_status(m, package.name, version_cmp, version, default_release, cache, state='install')
if installed:
return installed, installed_version, version_installable, has_files
# Otherwise return nothing so apt will sort out
# what package to satisfy this with
return False, False, True, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
except AttributeError:
# python-apt version too old to detect virtual packages
# mark as not installed and let apt-get install deal with it
return False, False, True, False
else:
return False, False, None, False
try:
has_files = len(pkg.installed_files) > 0
except UnicodeDecodeError:
has_files = True
except AttributeError:
has_files = False # older python-apt cannot be used to determine non-purged
try:
package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
except AttributeError: # python-apt 0.7.X has very weak low-level object
try:
# might not be necessary as python-apt post-0.7.X should have current_state property
package_is_installed = pkg.is_installed
except AttributeError:
# assume older version of python-apt is installed
package_is_installed = pkg.isInstalled
version_best = package_best_match(pkgname, version_cmp, version, default_release, cache._cache)
version_is_installed = False
version_installable = None
if package_is_installed:
try:
installed_version = pkg.installed.version
except AttributeError:
installed_version = pkg.installedVersion
if version_cmp == "=":
# check if the version is matched as well
version_is_installed = fnmatch.fnmatch(installed_version, version)
if version_best and installed_version != version_best and fnmatch.fnmatch(version_best, version):
version_installable = version_best
elif version_cmp == ">=":
version_is_installed = apt_pkg.version_compare(installed_version, version) >= 0
if version_best and installed_version != version_best and apt_pkg.version_compare(version_best, version) >= 0:
version_installable = version_best
else:
version_is_installed = True
if version_best and installed_version != version_best:
version_installable = version_best
else:
version_installable = version_best
return package_is_installed, version_is_installed, version_installable, has_files
def expand_dpkg_options(dpkg_options_compressed):
options_list = dpkg_options_compressed.split(',')
dpkg_options = ""
for dpkg_option in options_list:
dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
% (dpkg_options, dpkg_option)
return dpkg_options.strip()
def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
# Note: apt-get does implicit regex matching when an exact package name
# match is not found. Something like this:
# matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
# (Should also deal with the ':' for multiarch like the fnmatch code below)
#
# We have decided not to do similar implicit regex matching but might take
# a PR to add some sort of explicit regex matching:
# https://github.com/ansible/ansible-modules-core/issues/1258
new_pkgspec = []
if pkgspec:
for pkgspec_pattern in pkgspec:
if not isinstance(pkgspec_pattern, string_types):
m.fail_json(msg="Invalid type for package name, expected string but got %s" % type(pkgspec_pattern))
pkgname_pattern, version_cmp, version = package_split(pkgspec_pattern)
# note that none of these chars is allowed in a (debian) pkgname
if frozenset('*?[]!').intersection(pkgname_pattern):
# handle multiarch pkgnames, the idea is that "apt*" should
# only select native packages. But "apt*:i386" should still work
if ":" not in pkgname_pattern:
# Filter the multiarch packages from the cache only once
try:
pkg_name_cache = _non_multiarch # pylint: disable=used-before-assignment
except NameError:
pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841
else:
# Create a cache of pkg_names including multiarch only once
try:
pkg_name_cache = _all_pkg_names # pylint: disable=used-before-assignment
except NameError:
pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841
matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
if not matches:
m.fail_json(msg="No package(s) matching '%s' available" % to_text(pkgname_pattern))
else:
new_pkgspec.extend(matches)
else:
# No wildcards in name
new_pkgspec.append(pkgspec_pattern)
return new_pkgspec
def parse_diff(output):
diff = to_native(output).splitlines()
try:
# check for start marker from aptitude
diff_start = diff.index('Resolving dependencies...')
except ValueError:
try:
# check for start marker from apt-get
diff_start = diff.index('Reading state information...')
except ValueError:
# show everything
diff_start = -1
try:
# check for end marker line from both apt-get and aptitude
diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item))
except StopIteration:
diff_end = len(diff)
diff_start += 1
diff_end += 1
return {'prepared': '\n'.join(diff[diff_start:diff_end])}
def mark_installed_manually(m, packages):
if not packages:
return
apt_mark_cmd_path = m.get_bin_path("apt-mark")
# https://github.com/ansible/ansible/issues/40531
if apt_mark_cmd_path is None:
m.warn("Could not find apt-mark binary, not marking package(s) as manually installed.")
return
cmd = "%s manual %s" % (apt_mark_cmd_path, ' '.join(packages))
rc, out, err = m.run_command(cmd)
if APT_MARK_INVALID_OP in err or APT_MARK_INVALID_OP_DEB6 in err:
cmd = "%s unmarkauto %s" % (apt_mark_cmd_path, ' '.join(packages))
rc, out, err = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, fixed=False, autoremove=False, fail_on_autoremove=False, only_upgrade=False,
allow_unauthenticated=False, allow_downgrade=False, allow_change_held_packages=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
package_names = []
for package in pkgspec:
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
name, version_cmp, version = package_split(package)
package_names.append(name)
installed, installed_version, version_installable, has_files = package_status(m, name, version_cmp, version, default_release, cache, state='install')
if not installed and only_upgrade:
# only_upgrade upgrades packages that are already installed
# since this package is not installed, skip it
continue
if not installed_version and not version_installable:
status = False
data = dict(msg="no available installation candidate for %s" % package)
return (status, data)
if version_installable and ((not installed and not only_upgrade) or upgrade or not installed_version):
if version_installable is not True:
pkg_list.append("'%s=%s'" % (name, version_installable))
elif version:
pkg_list.append("'%s=%s'" % (name, version))
else:
pkg_list.append("'%s'" % name)
elif installed_version and version_installable and version_cmp == "=":
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# This is legacy behavior, and isn't documented (in fact it does
# things documentations says it shouldn't). It should not be relied
# upon.
pkg_list.append("'%s=%s'" % (name, version))
packages = ' '.join(pkg_list)
if packages:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if fail_on_autoremove:
fail_on_autoremove = '--no-remove'
else:
fail_on_autoremove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if fixed:
fixed = '--fix-broken'
else:
fixed = ''
if build_dep:
cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, fail_on_autoremove, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
(APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, fail_on_autoremove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
if allow_downgrade:
cmd += " --allow-downgrades"
if allow_change_held_packages:
cmd += " --allow-change-held-packages"
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
status = True
changed = True
if build_dep:
changed = APT_GET_ZERO not in out
data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
if rc:
status = False
data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
else:
status = True
data = dict(changed=False)
if not build_dep and not m.check_mode:
mark_installed_manually(m, package_names)
return (status, data)
def get_field_of_deb(m, deb_file, field="Version"):
cmd_dpkg = m.get_bin_path("dpkg", True)
cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
rc, stdout, stderr = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
return to_native(stdout).strip('\n')
def install_deb(
m, debs, cache, force, fail_on_autoremove, install_recommends,
allow_unauthenticated,
allow_downgrade,
allow_change_held_packages,
dpkg_options,
):
changed = False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file, cache=apt.Cache())
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
if hasattr(apt_pkg, 'get_architectures') and len(apt_pkg.get_architectures()) > 1:
pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
pkg_key = "%s:%s" % (pkg_name, pkg_arch)
else:
pkg_key = pkg_name
try:
installed_pkg = apt.Cache()[pkg_key]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check():
if force or ("later version" in pkg._failure_string and allow_downgrade):
pass
else:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
# Install 'Recommends' of this deb file
if install_recommends:
pkg_recommends = get_field_of_deb(m, deb_file, "Recommends")
deps_to_install.extend([pkg_name.strip() for pkg_name in pkg_recommends.split()])
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
fail_on_autoremove=fail_on_autoremove,
allow_unauthenticated=allow_unauthenticated,
allow_downgrade=allow_downgrade,
allow_change_held_packages=allow_change_held_packages,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if pkgs_to_install:
options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
def remove(m, pkgspec, cache, purge=False, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False,
allow_change_held_packages=False):
pkg_list = []
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version_cmp, version = package_split(package)
installed, installed_version, upgradable, has_files = package_status(m, name, version_cmp, version, None, cache, state='remove')
if installed_version or (has_files and purge):
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if not packages:
m.exit_json(changed=False)
else:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if purge:
purge = '--purge'
else:
purge = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if allow_change_held_packages:
allow_change_held_packages = '--allow-change-held-packages'
else:
allow_change_held_packages = ''
cmd = "%s -q -y %s %s %s %s %s %s remove %s" % (
APT_GET_CMD,
dpkg_options,
purge,
force_yes,
autoremove,
check_arg,
allow_change_held_packages,
packages
)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc)
m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
def cleanup(m, purge=False, force=False, operation=None,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
if operation not in frozenset(['autoremove', 'autoclean']):
raise AssertionError('Expected "autoremove" or "autoclean" cleanup operation, got %s' % operation)
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if purge:
purge = '--purge'
else:
purge = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
cmd = "%s -y %s %s %s %s %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, operation, check_arg)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'apt-get %s' failed: %s" % (operation, err), stdout=out, stderr=err, rc=rc)
changed = CLEAN_OP_CHANGED_STR[operation] in out
m.exit_json(changed=changed, stdout=out, stderr=err, diff=diff)
def aptclean(m):
clean_rc, clean_out, clean_err = m.run_command(['apt-get', 'clean'])
clean_diff = parse_diff(clean_out) if m._diff else {}
if clean_rc:
m.fail_json(msg="apt-get clean failed", stdout=clean_out, rc=clean_rc)
if clean_err:
m.fail_json(msg="apt-get clean failed: %s" % clean_err, stdout=clean_out, rc=clean_rc)
return (clean_out, clean_err, clean_diff)
def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, fail_on_autoremove=False,
allow_unauthenticated=False,
allow_downgrade=False,
):
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist" or (mode == "full" and use_apt_get):
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade %s" % (autoremove)
elif mode == "full" and not use_apt_get:
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
if use_apt_get:
apt_cmd = APT_GET_CMD
upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
if fail_on_autoremove:
if apt_cmd == APT_GET_CMD:
fail_on_autoremove = '--no-remove'
else:
m.warn("APTITUDE does not support '--no-remove', ignoring the 'fail_on_autoremove' parameter.")
fail_on_autoremove = ''
else:
fail_on_autoremove = ''
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
if allow_downgrade:
if apt_cmd == APT_GET_CMD:
allow_downgrade = '--allow-downgrades'
else:
m.warn("APTITUDE does not support '--allow-downgrades', ignoring the 'allow_downgrade' parameter.")
allow_downgrade = ''
else:
allow_downgrade = ''
if apt_cmd is None:
if use_apt_get:
apt_cmd = APT_GET_CMD
else:
m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s %s %s %s' % (
apt_cmd_path,
dpkg_options,
force_yes,
fail_on_autoremove,
allow_unauthenticated,
allow_downgrade,
check_arg,
upgrade_command,
)
if default_release:
cmd += " -t '%s'" % (default_release,)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
def get_cache_mtime():
"""Return mtime of a valid apt cache file.
Stat the apt cache file and if no cache file is found return 0
:returns: ``int``
"""
cache_time = 0
if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH):
cache_time = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
elif os.path.exists(APT_LISTS_PATH):
cache_time = os.stat(APT_LISTS_PATH).st_mtime
return cache_time
def get_updated_cache_time():
"""Return the mtime time stamp and the updated cache time.
Always retrieve the mtime of the apt cache or set the `cache_mtime`
variable to 0
:returns: ``tuple``
"""
cache_mtime = get_cache_mtime()
mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
return mtimestamp, updated_cache_time
# https://github.com/ansible/ansible-modules-core/issues/2951
def get_cache(module):
"""Attempt to get the cache object and update till it works"""
cache = None
try:
cache = apt.Cache()
except SystemError as e:
if '/var/lib/apt/lists/' in to_native(e).lower():
# update cache until files are fixed or retries exceeded
retries = 0
while retries < 2:
(rc, so, se) = module.run_command(['apt-get', 'update', '-q'])
retries += 1
if rc == 0:
break
if rc != 0:
module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (to_native(e), so + se), rc=rc)
# try again
cache = apt.Cache()
else:
module.fail_json(msg=to_native(e))
return cache
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes'], default='no'),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
fail_on_autoremove=dict(type='bool', default=False),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
clean=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
allow_downgrade=dict(type='bool', default=False, aliases=['allow-downgrade', 'allow_downgrades', 'allow-downgrades']),
allow_change_held_packages=dict(type='bool', default=False),
lock_timeout=dict(type='int', default=60),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
# We screenscrape apt-get and aptitude output for information so we need
# to make sure we use the best parsable locale when running commands
# also set apt specific vars for desired behaviour
locale = get_best_parsable_locale(module)
locale_module.setlocale(locale_module.LC_ALL, locale)
# APT related constants
APT_ENV_VARS = dict(
DEBIAN_FRONTEND='noninteractive',
DEBIAN_PRIORITY='critical',
LANG=locale,
LC_ALL=locale,
LC_MESSAGES=locale,
LC_CTYPE=locale,
LANGUAGE=locale,
)
module.run_command_environ_update = APT_ENV_VARS
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
install_recommends = p['install_recommends']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
if not HAS_PYTHON_APT:
# This interpreter can't see the apt Python library- we'll do the following to try and fix that:
# 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
# 2) finding none, try to install a matching python-apt package for the current interpreter version;
# we limit to the current interpreter version to try and avoid installing a whole other Python just
# for apt support
# 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
# the current interpreter again, but we'll let it respawn anyway for simplicity)
# 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
# made any more complex than it already is to try and cover more, eg, custom interpreters taking over
# system locations)
apt_pkg_name = 'python3-apt'
if has_respawned():
# this shouldn't be possible; short-circuit early if it happens...
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
interpreters = ['/usr/bin/python3', '/usr/bin/python']
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
# don't make changes if we're in check_mode
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % apt_pkg_name)
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
module.run_command([APT_GET_CMD, 'update'], check_rc=True)
# try to install the apt Python binding
apt_pkg_cmd = [APT_GET_CMD, 'install', apt_pkg_name, '-y', '-q', dpkg_options]
if install_recommends is False:
apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=no"])
elif install_recommends is True:
apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=yes"])
# install_recommends is None uses the OS default
module.run_command(apt_pkg_cmd, check_rc=True)
# try again to find the bindings in common places
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
# NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
else:
# we've done all we can do; just tell the user it's busted and get out
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
if p['clean'] is True:
aptclean_stdout, aptclean_stderr, aptclean_diff = aptclean(module)
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and p['upgrade'] == 'no' and not p['deb']:
module.exit_json(
changed=True,
msg=aptclean_stdout,
stdout=aptclean_stdout,
stderr=aptclean_stderr,
diff=aptclean_diff
)
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
allow_unauthenticated = p['allow_unauthenticated']
allow_downgrade = p['allow_downgrade']
allow_change_held_packages = p['allow_change_held_packages']
autoremove = p['autoremove']
fail_on_autoremove = p['fail_on_autoremove']
autoclean = p['autoclean']
# max times we'll retry
deadline = time.time() + p['lock_timeout']
# keep running on lock issues unless timeout or resolution is hit.
while True:
# Get the cache object, this has 3 retries built in
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = secrets.randbelow(1000) / 1000.0
for retry in range(update_cache_retries):
try:
if not module.check_mode:
cache.update()
break
except apt.cache.FetchFailedException as fetch_failed_exc:
err = fetch_failed_exc
module.warn(
f"Failed to update cache after {retry + 1} retries due "
f"to {to_native(fetch_failed_exc)}, retrying"
)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to refresh the cache again")
else:
msg = (
f"Failed to update apt cache after {update_cache_retries} retries: "
f"{err if err else 'unknown reason'}"
)
module.fail_json(msg=msg)
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if module.check_mode or updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(
module,
p['upgrade'],
force_yes,
p['default_release'],
use_apt_get,
dpkg_options,
autoremove,
fail_on_autoremove,
allow_unauthenticated,
allow_downgrade
)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
allow_change_held_packages=allow_change_held_packages,
allow_downgrade=allow_downgrade,
force=force_yes, fail_on_autoremove=fail_on_autoremove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(
module,
'yes',
force_yes,
p['default_release'],
use_apt_get,
dpkg_options,
autoremove,
fail_on_autoremove,
allow_unauthenticated,
allow_downgrade
)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
fail_on_autoremove=fail_on_autoremove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated,
allow_downgrade=allow_downgrade,
allow_change_held_packages=allow_change_held_packages,
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(
module,
packages,
cache,
p['purge'],
force=force_yes,
dpkg_options=dpkg_options,
autoremove=autoremove,
allow_change_held_packages=allow_change_held_packages
)
except apt.cache.LockFailedException as lockFailedException:
if time.time() < deadline:
continue
module.fail_json(msg="Failed to lock apt for exclusive operation: %s" % lockFailedException)
except apt.cache.FetchFailedException as fetchFailedException:
module.fail_json(msg="Could not fetch updated apt files: %s" % fetchFailedException)
# got here w/o exception and/or exit???
module.fail_json(msg='Unexpected code path taken, we really should have exited before, this is a bug')
if __name__ == "__main__":
main()
| 61,956
|
Python
|
.py
| 1,345
| 36.166543
| 166
| 0.615074
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,139
|
include_vars.py
|
ansible_ansible/lib/ansible/modules/include_vars.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
author: Allen Sanabria (@linuxdynasty)
module: include_vars
short_description: Load variables from files, dynamically within a task
description:
- Loads YAML/JSON variables dynamically from a file or directory, recursively, during task runtime.
- If loading a directory, the files are sorted alphabetically before being loaded.
- This module is also supported for Windows targets.
- To assign included variables to a different host than C(inventory_hostname),
use C(delegate_to) and set C(delegate_facts=yes).
version_added: "1.4"
options:
file:
description:
- The file name from which variables should be loaded.
- If the path is relative, it will look for the file in C(vars/) subdirectory of a role or relative to playbook.
type: path
version_added: "2.2"
dir:
description:
- The directory name from which the variables should be loaded.
- If the path is relative and the task is inside a role, it will look inside the role's C(vars/) subdirectory.
- If the path is relative and not inside a role, it will be parsed relative to the playbook.
type: path
version_added: "2.2"
name:
description:
- The name of a variable into which assign the included vars.
- If omitted (V(null)) they will be made top level vars.
type: str
version_added: "2.2"
depth:
description:
- When using O(dir), this module will, by default, recursively go through each sub directory and load up the
variables. By explicitly setting the depth, this module will only go as deep as the depth.
type: int
default: 0
version_added: "2.2"
files_matching:
description:
- Limit the files that are loaded within any directory to this regular expression.
type: str
version_added: "2.2"
ignore_files:
description:
- List of file names to ignore.
type: list
elements: str
version_added: "2.2"
extensions:
description:
- List of file extensions to read when using O(dir).
type: list
elements: str
default: [ json, yaml, yml ]
version_added: "2.3"
ignore_unknown_extensions:
description:
- Ignore unknown file extensions within the directory.
- This allows users to specify a directory containing vars files that are intermingled with non-vars files extension types
(e.g. a directory with a README in it and vars files).
type: bool
default: no
version_added: "2.7"
hash_behaviour:
description:
- If set to V(merge), merges existing hash variables instead of overwriting them.
- If omitted (V(null)), the behavior falls back to the global C(hash_behaviour) configuration.
- This option is self-contained and does not apply to individual files in O(dir). You can use a loop to apply O(hash_behaviour) per file.
default: null
type: str
choices: ["replace", "merge"]
version_added: "2.12"
free-form:
description:
- This module allows you to specify the O(file) option directly without any other options.
- There is no O(ignore:free-form) option, this is just an indicator, see example below.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
attributes:
action:
details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
support: partial
bypass_host_loop:
support: none
bypass_task_loop:
support: none
check_mode:
support: full
delegation:
details:
- while variable assignment can be delegated to a different host the execution context is always the current inventory_hostname
- connection variables, if set at all, would reflect the host it would target, even if we are not connecting at all in this case
support: partial
diff_mode:
support: none
core:
details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
support: partial
seealso:
- module: ansible.builtin.set_fact
- ref: playbooks_delegation
description: More information related to task delegation.
"""
EXAMPLES = r"""
- name: Include vars of stuff.yaml into the 'stuff' variable (2.2).
ansible.builtin.include_vars:
file: stuff.yaml
name: stuff
- name: Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
ansible.builtin.include_vars:
file: contingency_plan.yaml
name: plans
when: x == 0
- name: Load a variable file based on the OS type, or a default if not found. Using free-form to specify the file.
ansible.builtin.include_vars: "{{ lookup('ansible.builtin.first_found', params) }}"
vars:
params:
files:
- '{{ansible_distribution}}.yaml'
- '{{ansible_os_family}}.yaml'
- default.yaml
paths:
- 'vars'
- name: Bare include (free-form)
ansible.builtin.include_vars: myvars.yaml
- name: Include all .json and .jsn files in vars/all and all nested directories (2.3)
ansible.builtin.include_vars:
dir: vars/all
extensions:
- 'json'
- 'jsn'
- name: Include all default extension files in vars/all and all nested directories and save the output in test. (2.2)
ansible.builtin.include_vars:
dir: vars/all
name: test
- name: Include default extension files in vars/services (2.2)
ansible.builtin.include_vars:
dir: vars/services
depth: 1
- name: Include only files matching bastion.yaml (2.2)
ansible.builtin.include_vars:
dir: vars
files_matching: bastion.yaml
- name: Include all .yaml files except bastion.yaml (2.3)
ansible.builtin.include_vars:
dir: vars
ignore_files:
- 'bastion.yaml'
extensions:
- 'yaml'
- name: Ignore warnings raised for files with unknown extensions while loading (2.7)
ansible.builtin.include_vars:
dir: vars
ignore_unknown_extensions: True
extensions:
- ''
- 'yaml'
- 'yml'
- 'json'
"""
RETURN = r"""
ansible_facts:
description: Variables that were included and their values
returned: success
type: dict
sample: {'variable': 'value'}
ansible_included_var_files:
description: A list of files that were successfully included
returned: success
type: list
sample: [ /path/to/file.json, /path/to/file.yaml ]
version_added: '2.4'
"""
| 6,722
|
Python
|
.py
| 181
| 32.430939
| 159
| 0.707018
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,140
|
cron.py
|
ansible_ansible/lib/ansible/modules/cron.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: cron
short_description: Manage cron.d and crontab entries
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the O(name) passed to the module,
which is used by future ansible/module calls to find/check the state. The O(name)
parameter should be unique, and changing the O(name) value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
needs to find/check the state, it uses the O(name) parameter to find the environment
variable definition line.
- When using symbols such as C(%), they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if O(env) is set, the name of environment variable.
- This parameter is always required as of ansible-core 2.12.
type: str
required: yes
user:
description:
- The specific user whose crontab should be modified.
- When unset, this parameter defaults to the current user.
type: str
job:
description:
- The command to execute or, if O(env) is set, the value of environment variable.
- The command should not contain line breaks.
- Required if O(state=present).
type: str
aliases: [ value ]
state:
description:
- Whether to ensure the job or environment variable is present or absent.
type: str
choices: [ absent, present ]
default: present
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
The assumption is that this file is exclusively managed by the module,
do not use if the file contains multiple entries, NEVER use for /etc/crontab.
- If this is a relative path, it is interpreted with respect to C(/etc/cron.d).
- Many Linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- Using this parameter requires you to specify the O(user) as well, unless O(state=absent).
- Either this parameter or O(name) is required.
type: path
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the RV(ignore:backup_file) variable by this module.
type: bool
default: no
minute:
description:
- Minute when the job should run (V(0-59), V(*), V(*/2), and so on).
type: str
default: "*"
hour:
description:
- Hour when the job should run (V(0-23), V(*), V(*/2), and so on).
type: str
default: "*"
day:
description:
- Day of the month the job should run (V(1-31), V(*), V(*/2), and so on).
type: str
default: "*"
aliases: [ dom ]
month:
description:
- Month of the year the job should run (V(1-12), V(*), V(*/2), and so on).
type: str
default: "*"
weekday:
description:
- Day of the week that the job should run (V(0-6) for Sunday-Saturday, V(*), and so on).
type: str
default: "*"
aliases: [ dow ]
special_time:
description:
- Special time specification nickname.
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
disabled:
description:
- If the job should be disabled (commented out) in the crontab.
- Only has effect if O(state=present).
type: bool
default: no
version_added: "2.0"
env:
description:
- If set, manages a crontab's environment variable.
- New variables are added on top of crontab.
- O(name) and O(value) parameters are the name and the value of environment variable.
type: bool
default: false
version_added: "2.1"
insertafter:
description:
- Used with O(state=present) and O(env).
- If specified, the environment variable will be inserted after the declaration of specified environment variable.
type: str
version_added: "2.1"
insertbefore:
description:
- Used with O(state=present) and O(env).
- If specified, the environment variable will be inserted before the declaration of specified environment variable.
type: str
version_added: "2.1"
requirements:
- cron (any 'vixie cron' conformant variant, like cronie)
notes:
- If you are experiencing permissions issues with cron and MacOS,
you should see the official MacOS documentation for further information.
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
- Patrick Callahan (@dirtyharrycallahan)
- Evan Kaufman (@EvanK)
- Luca Berruti (@lberruti)
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
support: full
platforms: posix
"""
EXAMPLES = r"""
- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
ansible.builtin.cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
ansible.builtin.cron:
name: "an old job"
state: absent
- name: Creates an entry like "@reboot /some/job.sh"
ansible.builtin.cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
- name: Creates an entry like "PATH=/opt/bin" on top of crontab
ansible.builtin.cron:
name: PATH
env: yes
job: /opt/bin
- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
ansible.builtin.cron:
name: APP_HOME
env: yes
job: /srv/app
insertafter: PATH
- name: Creates a cron file under /etc/cron.d
ansible.builtin.cron:
name: yum autoupdate
weekday: "2"
minute: "0"
hour: "12"
user: root
job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
- name: Removes a cron file from under /etc/cron.d
ansible.builtin.cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
- name: Removes "APP_HOME" environment variable from crontab
ansible.builtin.cron:
name: APP_HOME
env: yes
state: absent
"""
RETURN = r"""#"""
import os
import platform
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.file import S_IRWU_RWG_RWO
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.six.moves import shlex_quote
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to current user)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.n_existing = ''
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
with open(self.b_cron_file, 'rb') as f:
self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
self.lines = self.n_existing.splitlines()
except IOError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.n_existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match(r'# \(/tmp/.*installed on.*\)', l) and
not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.n_existing = re.sub(pattern, '', self.n_existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
for line in self.lines:
if line.strip():
return False
return True
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'wb')
elif self.cron_file:
fileh = open(self.b_cron_file, 'wb')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, S_IRWU_RWG_RWO)
fileh = os.fdopen(filed, 'wb')
fileh.write(to_bytes(self.render()))
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=f"Failed to install new cronfile: {path}", stderr=err, stdout=out, rc=rc)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0] + 1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match(r'%s' % self.ansible, l):
comment = re.sub(r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i - 1] == self.do_comment(None):
self.lines[i - 1] = self.do_comment(name)
return [self.lines[i - 1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match(r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match(r'%s' % self.ansible, l):
jobnames.append(re.sub(r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match(r'^\S+=', l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match(r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
user=dict(type='str'),
job=dict(type='str', aliases=['value']),
cron_file=dict(type='path'),
state=dict(type='str', default='present', choices=['present', 'absent']),
backup=dict(type='bool', default=False),
minute=dict(type='str', default='*'),
hour=dict(type='str', default='*'),
day=dict(type='str', default='*', aliases=['dom']),
month=dict(type='str', default='*'),
weekday=dict(type='str', default='*', aliases=['dow']),
special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
disabled=dict(type='bool', default=False),
env=dict(type='bool', default=False),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['insertafter', 'insertbefore'],
],
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
if cron_file == '/etc/crontab':
module.fail_json(msg="Will not manage /etc/crontab via cron_file, see documentation.")
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if module._diff:
diff = dict()
diff['before'] = crontab.n_existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if special_time and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if special_time and platform.system() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if do_install:
if cron_file and not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
if crontab.cron_file and crontab.is_empty():
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.n_existing != '':
if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
changed = True
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
warnings=warnings,
changed=changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
| 26,411
|
Python
|
.py
| 658
| 30.952888
| 125
| 0.586375
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,141
|
meta.py
|
ansible_ansible/lib/ansible/modules/meta.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, a Red Hat company
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
module: meta
short_description: Execute Ansible 'actions'
version_added: '1.2'
description:
- Meta tasks are a special kind of task which can influence Ansible internal execution or state.
- Meta tasks can be used anywhere within your playbook.
- This module is also supported for Windows targets.
options:
free_form:
description:
- This module takes a free form command, as a string. There is not an actual option named "free form". See the examples!
- V(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain
points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays).
- V(refresh_inventory) (added in Ansible 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be
re-executed. If the dynamic inventory script is using a cache, Ansible cannot know this and has no way of refreshing it (you can disable the cache
or, if available for your specific inventory datasource (e.g. aws), you can use the an inventory plugin instead of an inventory script).
This is mainly useful when additional hosts are created and users wish to use them instead of using the M(ansible.builtin.add_host) module.
- V(noop) (added in Ansible 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use.
- V(clear_facts) (added in Ansible 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared,
including the fact cache.
- V(clear_host_errors) (added in Ansible 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts.
- V(end_play) (added in Ansible 2.2) causes the play to end without failing the host(s). Note that this affects all hosts.
- V(reset_connection) (added in Ansible 2.3) interrupts a persistent connection (i.e. ssh + control persist)
- V(end_host) (added in Ansible 2.8) is a per-host variation of V(end_play). Causes the play to end for the current host without failing it.
- V(end_batch) (added in Ansible 2.12) causes the current batch (see C(serial)) to end without failing the host(s).
Note that with C(serial=0) or undefined this behaves the same as V(end_play).
- V(end_role) (added in Ansible 2.18) causes the currently executing role to end without failing the host(s).
Effectively all tasks from within a role after V(end_role) is executed are ignored. Since handlers live in a global,
play scope, all handlers added via the role are unaffected and are still executed if notified. It is an error
to call V(end_role) from outside of a role or from a handler. Note that V(end_role) does not have an effect to
the parent roles or roles that depend (via dependencies in meta/main.yml) on a role executing V(end_role).
choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch, end_role ]
required: true
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
attributes:
action:
support: none
bypass_host_loop:
details: Some of the subactions ignore the host loop, see the description above for each specific action for the exceptions
support: partial
bypass_task_loop:
details: Most of the subactions ignore the task loop, see the description above for each specific action for the exceptions
support: partial
check_mode:
details: While these actions don't modify the targets directly they do change possible states of the target within the run
support: partial
delegation:
support: none
diff_mode:
support: none
ignore_conditional:
details: Only some options support conditionals and when they do they act 'bypassing the host loop', taking the values from first available host
support: partial
connection:
details: Most options in this action do not use a connection, except V(reset_connection) which still does not connect to the remote
support: partial
until:
support: none
notes:
- V(clear_facts) will remove the persistent facts from M(ansible.builtin.set_fact) using O(ansible.builtin.set_fact#module:cacheable=True),
but not the current host variable it creates for the current run.
- Skipping M(ansible.builtin.meta) tasks with tags is not supported before Ansible 2.11.
seealso:
- module: ansible.builtin.assert
- module: ansible.builtin.fail
author:
- Ansible Core Team
"""
EXAMPLES = r"""
# Example showing flushing handlers on demand, not at end of play
- ansible.builtin.template:
src: new.j2
dest: /etc/config.txt
notify: myhandler
- name: Force all notified handlers to run at this point, not waiting for normal sync points
ansible.builtin.meta: flush_handlers
# Example showing how to refresh inventory during play
- name: Reload inventory, useful with dynamic inventories when play makes changes to the existing hosts
cloud_guest: # this is fake module
name: newhost
state: present
- name: Refresh inventory to ensure new instances exist in inventory
ansible.builtin.meta: refresh_inventory
# Example showing how to clear all existing facts of targeted hosts
- name: Clear gathered facts from all currently targeted hosts
ansible.builtin.meta: clear_facts
# Example showing how to continue using a failed target
- name: Bring host back to play after failure
ansible.builtin.copy:
src: file
dest: /etc/file
remote_user: imightnothavepermission
- ansible.builtin.meta: clear_host_errors
# Example showing how to reset an existing connection
- ansible.builtin.user:
name: '{{ ansible_user }}'
groups: input
- name: Reset ssh connection to allow user changes to affect 'current login user'
ansible.builtin.meta: reset_connection
# Example showing how to end the play for specific targets
- name: End the play for hosts that run CentOS 6
ansible.builtin.meta: end_host
when:
- ansible_distribution == 'CentOS'
- ansible_distribution_major_version == '6'
"""
| 6,620
|
Python
|
.py
| 115
| 52.756522
| 156
| 0.741026
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,142
|
include_role.py
|
ansible_ansible/lib/ansible/modules/include_role.py
|
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
author: Ansible Core Team (@ansible)
module: include_role
short_description: Load and execute a role
description:
- Dynamically loads and executes a specified role as a task.
- May be used only where Ansible tasks are allowed - inside C(pre_tasks), C(tasks), or C(post_tasks) play objects, or as a task inside a role.
- Task-level keywords, loops, and conditionals apply only to the C(include_role) statement itself.
- To apply keywords to the tasks within the role, pass them using the O(apply) option or use M(ansible.builtin.import_role) instead.
- Ignores some keywords, like C(until) and C(retries).
- This module is also supported for Windows targets.
- Does not work in handlers.
version_added: "2.2"
options:
apply:
description:
- Accepts a hash of task keywords (for example C(tags), C(become)) that will be applied to all tasks within the included role.
version_added: '2.7'
name:
description:
- The name of the role to be executed.
type: str
required: True
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
type: str
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
type: str
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
type: str
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: yes
public:
description:
- This option dictates whether the role's C(vars) and C(defaults) are exposed to the play. If set to V(true)
the variables will be available to tasks following the C(include_role) task. This functionality differs from
standard variable exposure for roles listed under the C(roles) header or M(ansible.builtin.import_role) as they are exposed
to the play at playbook parsing time, and available to earlier roles and tasks as well.
type: bool
default: no
version_added: '2.7'
handlers_from:
description:
- File to load from a role's C(handlers/) directory.
type: str
default: main
version_added: '2.8'
rolespec_validate:
description:
- Perform role argument spec validation if an argument spec is defined.
type: bool
default: yes
version_added: '2.11'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
- action_core.include
attributes:
check_mode:
support: full
diff_mode:
support: none
notes:
- Handlers and are made available to the whole play.
- After Ansible 2.4, you can use M(ansible.builtin.import_role) for B(static) behaviour and this action for B(dynamic) one.
seealso:
- module: ansible.builtin.import_playbook
- module: ansible.builtin.import_role
- module: ansible.builtin.import_tasks
- module: ansible.builtin.include_tasks
- ref: playbooks_reuse
description: More information related to including and importing playbooks, roles and tasks.
"""
EXAMPLES = r"""
- ansible.builtin.include_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
ansible.builtin.include_role:
name: myrole
tasks_from: other
- name: Pass variables to role
ansible.builtin.include_role:
name: myrole
vars:
rolevar1: value from task
- name: Use role in loop
ansible.builtin.include_role:
name: '{{ roleinputvar }}'
loop:
- '{{ roleinput1 }}'
- '{{ roleinput2 }}'
loop_control:
loop_var: roleinputvar
- name: Conditional role
ansible.builtin.include_role:
name: myrole
when: not idontwanttorun
- name: Apply tags to tasks within included file
ansible.builtin.include_role:
name: install
apply:
tags:
- install
tags:
- always
"""
RETURN = r"""
# This module does not return anything except tasks to execute.
"""
| 4,223
|
Python
|
.py
| 127
| 29.314961
| 144
| 0.717258
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,143
|
systemd_service.py
|
ansible_ansible/lib/ansible/modules/systemd_service.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: systemd_service
author:
- Ansible Core Team
version_added: "2.2"
short_description: Manage systemd units
description:
- Controls systemd units (services, timers, and so on) on remote hosts.
- M(ansible.builtin.systemd) is renamed to M(ansible.builtin.systemd_service) to better reflect the scope of the module.
M(ansible.builtin.systemd) is kept as an alias for backward compatibility.
options:
name:
description:
- Name of the unit. This parameter takes the name of exactly one unit to work with.
- When no extension is given, it is implied to a C(.service) as systemd.
- When using in a chroot environment you always need to specify the name of the unit with the extension. For example, C(crond.service).
type: str
aliases: [ service, unit ]
state:
description:
- V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
V(restarted) will always bounce the unit.
V(reloaded) will always reload and if the service is not running at the moment of the reload, it is started.
- If set, requires O(name).
type: str
choices: [ reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the unit should start on boot. At least one of O(state) and O(enabled) are required.
- If set, requires O(name).
type: bool
force:
description:
- Whether to override existing symlinks.
type: bool
version_added: 2.6
masked:
description:
- Whether the unit should be masked or not. A masked unit is impossible to start.
- If set, requires O(name).
type: bool
daemon_reload:
description:
- Run C(daemon-reload) before doing any other operations, to make sure systemd has read any changes.
- When set to V(true), runs C(daemon-reload) even if the module does not start or stop anything.
type: bool
default: no
aliases: [ daemon-reload ]
daemon_reexec:
description:
- Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
type: bool
default: no
aliases: [ daemon-reexec ]
version_added: "2.8"
scope:
description:
- Run C(systemctl) within a given service manager scope, either as the default system scope V(system),
the current user's scope V(user), or the scope of all users V(global).
- "For systemd to work with V(user), the executing user must have its own instance of dbus started and accessible (systemd requirement)."
- "The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
- The user must have access, normally given via setting the C(XDG_RUNTIME_DIR) variable, see the example below.
type: str
choices: [ system, user, global ]
default: system
version_added: "2.7"
no_block:
description:
- Do not synchronously wait for the requested operation to finish.
Enqueued job will continue without Ansible blocking on its completion.
type: bool
default: no
version_added: "2.3"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
notes:
- O(state), O(enabled), O(masked) requires O(name).
- Before 2.4 you always required O(name).
- Globs are not supported in name, in other words, C(postgres*.service).
- The service names might vary by specific OS/distribution.
- The order of execution when having multiple properties is to first enable/disable, then mask/unmask and then deal with the service state.
It has been reported that C(systemctl) can behave differently depending on the order of operations if you do the same manually.
requirements:
- A system managed by systemd.
"""
EXAMPLES = """
- name: Make sure a service unit is running
ansible.builtin.systemd_service:
state: started
name: httpd
- name: Stop service cron on debian, if running
ansible.builtin.systemd_service:
name: cron
state: stopped
- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
ansible.builtin.systemd_service:
state: restarted
daemon_reload: true
name: crond
- name: Reload service httpd, in all cases
ansible.builtin.systemd_service:
name: httpd.service
state: reloaded
- name: Enable service httpd and ensure it is not masked
ansible.builtin.systemd_service:
name: httpd
enabled: true
masked: no
- name: Enable a timer unit for dnf-automatic
ansible.builtin.systemd_service:
name: dnf-automatic.timer
state: started
enabled: true
- name: Just force systemd to reread configs (2.4 and above)
ansible.builtin.systemd_service:
daemon_reload: true
- name: Just force systemd to re-execute itself (2.8 and above)
ansible.builtin.systemd_service:
daemon_reexec: true
- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
ansible.builtin.systemd_service:
name: myservice
state: started
scope: user
environment:
XDG_RUNTIME_DIR: "/run/user/{{ myuid }}"
"""
RETURN = """
status:
description: A dictionary with the key=value pairs returned from C(systemctl show).
returned: success
type: dict
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
""" # NOQA
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts.system.chroot import is_chroot
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
from ansible.module_utils.common.text.converters import to_native
def is_running_service(service_status):
return service_status['ActiveState'] in set(['active', 'activating'])
def is_deactivating_service(service_status):
return service_status['ActiveState'] in set(['deactivating'])
def request_was_ignored(out):
return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
def parse_systemctl_show(lines):
# The output of 'systemctl show' can contain values that span multiple lines. At first glance it
# appears that such values are always surrounded by {}, so the previous version of this code
# assumed that any value starting with { was a multi-line value; it would then consume lines
# until it saw a line that ended with }. However, it is possible to have a single-line value
# that starts with { but does not end with } (this could happen in the value for Description=,
# for example), and the previous version of this code would then consume all remaining lines as
# part of that value. Cryptically, this would lead to Ansible reporting that the service file
# couldn't be found.
#
# To avoid this issue, the following code only accepts multi-line values for keys whose names
# start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
# span multiple lines.
parsed = {}
multival = []
k = None
for line in lines:
if k is None:
if '=' in line:
k, v = line.split('=', 1)
if k.startswith('Exec') and v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(v)
continue
parsed[k] = v.strip()
k = None
else:
multival.append(line)
if line.rstrip().endswith('}'):
parsed[k] = '\n'.join(multival).strip()
multival = []
k = None
return parsed
# ===========================================
# Main control flow
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', aliases=['service', 'unit']),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
force=dict(type='bool'),
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
required_by=dict(
state=('name', ),
enabled=('name', ),
masked=('name', ),
),
)
unit = module.params['name']
if unit is not None:
for globpattern in (r"*", r"?", r"["):
if globpattern in unit:
module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
systemctl = module.get_bin_path('systemctl', True)
if os.getenv('XDG_RUNTIME_DIR') is None:
os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
# Set CLI options depending on params
# if scope is 'system' or None, we can ignore as there is no extra switch.
# The other choices match the corresponding switch
if module.params['scope'] != 'system':
systemctl += " --%s" % module.params['scope']
if module.params['no_block']:
systemctl += " --no-block"
if module.params['force']:
systemctl += " --force"
rc = 0
out = err = ''
result = dict(
name=unit,
changed=False,
status=dict(),
)
# Run daemon-reload first, if requested
if module.params['daemon_reload'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
if is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
module.warn('daemon-reload failed, but target is a chroot or systemd is offline. Continuing. Error was: %d / %s' % (rc, err))
else:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
# Run daemon-reexec
if module.params['daemon_reexec'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
if rc != 0:
if is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
module.warn('daemon-reexec failed, but target is a chroot or systemd is offline. Continuing. Error was: %d / %s' % (rc, err))
else:
module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
if unit:
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
# load return of systemctl show into dictionary for easy access and return
if out:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
# Check for loading error
if is_systemd and not is_masked and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
# Workaround for https://github.com/ansible/ansible/issues/71528
elif err and rc == 1 and 'Failed to parse bus message' in err:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
unit_base, sep, suffix = unit.partition('@')
unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep)
(rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search))
is_systemd = unit_search in out
(rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
result['status']['ActiveState'] = out.rstrip('\n')
else:
# list taken from man systemctl(1) for systemd 244
valid_enabled_states = [
"enabled",
"enabled-runtime",
"linked",
"linked-runtime",
"masked",
"masked-runtime",
"static",
"indirect",
"disabled",
"generated",
"transient"]
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
if out.strip() in valid_enabled_states:
is_systemd = True
else:
# fallback list-unit-files as show does not work on some systems (chroot)
# not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
(rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
if rc == 0:
is_systemd = True
else:
# Check for systemctl command
module.run_command(systemctl, check_rc=True)
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
masked = out.strip() == "masked"
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# here if service was not missing, but failed for other reasons
module.fail_json(msg=f"Failed to {action} the service ({unit}): {err.strip()}")
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s' -l" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
# https://www.freedesktop.org/software/systemd/man/systemctl.html#is-enabled%20UNIT%E2%80%A6
if out.rstrip() in (
"enabled-runtime", # transiently enabled but we're trying to set a permanent enabled
"indirect", # We've been asked to enable this unit so do so despite possible reasons
# that systemctl may have for thinking it's enabled already.
"alias"): # Let systemd handle the alias as we can't be sure what's needed.
enabled = False
else:
enabled = True
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
is_initd and \
not out.strip().endswith('disabled') and \
sysv_is_enabled(unit):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']) or is_deactivating_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# check for chroot
elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
| 24,990
|
Python
|
.py
| 520
| 36.792308
| 182
| 0.581814
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,144
|
hostname.py
|
ansible_ansible/lib/ansible/modules/hostname.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: hostname
author:
- Adrian Likins (@alikins)
- Hideki Saito (@saito-hideki)
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname. Supports most OSs/Distributions including those using C(systemd).
- Windows, HP-UX, and AIX are not currently supported.
notes:
- This module does B(NOT) modify C(/etc/hosts). You need to modify it yourself using other modules such as M(ansible.builtin.template)
or M(ansible.builtin.replace).
- On macOS, this module uses C(scutil) to set C(HostName), C(ComputerName), and C(LocalHostName). Since C(LocalHostName)
cannot contain spaces or most special characters, this module will replace characters when setting C(LocalHostName).
options:
name:
description:
- Name of the host.
- If the value is a fully qualified domain name that does not resolve from the given host,
this will cause the module to hang for a few seconds while waiting for the name resolution attempt to timeout.
type: str
required: true
use:
description:
- Which strategy to use to update the hostname.
- If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information.
- Note that V(systemd) should be specified for RHEL/EL/CentOS 7+. Older distributions should use V(redhat).
choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd']
type: str
version_added: '2.9'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
attributes:
check_mode:
support: full
diff_mode:
support: full
facts:
support: full
platform:
platforms: posix
"""
EXAMPLES = """
- name: Set a hostname
ansible.builtin.hostname:
name: web01
- name: Set a hostname specifying strategy
ansible.builtin.hostname:
name: web01
use: systemd
"""
import os
import platform
import socket
import traceback
import ansible.module_utils.compat.typing as t
from ansible.module_utils.basic import (
AnsibleModule,
get_distribution,
get_distribution_version,
)
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils.facts.utils import get_file_lines, get_file_content
from ansible.module_utils.common.text.converters import to_native, to_text
STRATS = {
'alpine': 'Alpine',
'debian': 'Systemd',
'freebsd': 'FreeBSD',
'generic': 'Base',
'macos': 'Darwin',
'macosx': 'Darwin',
'darwin': 'Darwin',
'openbsd': 'OpenBSD',
'openrc': 'OpenRC',
'redhat': 'RedHat',
'sles': 'SLES',
'solaris': 'Solaris',
'systemd': 'Systemd',
}
class BaseStrategy(object):
def __init__(self, module):
self.module = module
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
if not self.module.check_mode:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
if not self.module.check_mode:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
return self.get_permanent_hostname()
def set_current_hostname(self, name):
pass
def get_permanent_hostname(self):
raise NotImplementedError
def set_permanent_hostname(self, name):
raise NotImplementedError
class UnimplementedStrategy(BaseStrategy):
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
system = platform.system()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (system, distribution)
else:
msg_platform = system
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class CommandStrategy(BaseStrategy):
COMMAND = 'hostname'
def __init__(self, module):
super(CommandStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
return 'UNKNOWN'
def set_permanent_hostname(self, name):
pass
class FileStrategy(BaseStrategy):
FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.FILE):
return ''
try:
return get_file_content(self.FILE, default='', strip=True)
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
with open(self.FILE, 'w+') as f:
f.write("%s\n" % name)
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class SLESStrategy(FileStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
FILE = '/etc/HOSTNAME'
class RedHatStrategy(BaseStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
for line in get_file_lines(self.NETWORK_FILE):
line = to_native(line).strip()
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
self.module.fail_json(
"Unable to locate HOSTNAME entry in %s" % self.NETWORK_FILE
)
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
lines = []
found = False
content = get_file_content(self.NETWORK_FILE, strip=False) or ""
for line in content.splitlines(True):
line = to_native(line)
if line.strip().startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
if not found:
lines.append("HOSTNAME=%s\n" % name)
with open(self.NETWORK_FILE, 'w+') as f:
f.writelines(lines)
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class AlpineStrategy(FileStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
FILE = '/etc/hostname'
COMMAND = 'hostname'
def set_current_hostname(self, name):
super(AlpineStrategy, self).set_current_hostname(name)
hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
cmd = [hostname_cmd, '-F', self.FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class SystemdStrategy(BaseStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
COMMAND = "hostnamectl"
def __init__(self, module):
super(SystemdStrategy, self).__init__(module)
self.hostnamectl_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostnamectl_cmd, '--transient', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = [self.hostnamectl_cmd, '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
cmd = [self.hostnamectl_cmd, '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = [self.hostnamectl_cmd, '--pretty', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def update_current_and_permanent_hostname(self):
# Must set the permanent hostname prior to current to avoid NetworkManager complaints
# about setting the hostname outside of NetworkManager
self.update_permanent_hostname()
self.update_current_hostname()
return self.changed
class OpenRCStrategy(BaseStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.FILE):
return ''
try:
for line in get_file_lines(self.FILE):
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
lines = [x.strip() for x in get_file_lines(self.FILE)]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
with open(self.FILE, 'w') as f:
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class OpenBSDStrategy(FileStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file for the permanent hostname and executes hostname
command for the current hostname.
"""
FILE = '/etc/myname'
COMMAND = "hostname"
def __init__(self, module):
super(OpenBSDStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class SolarisStrategy(BaseStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
COMMAND = "hostname"
def __init__(self, module):
super(SolarisStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class FreeBSDStrategy(BaseStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
FILE = '/etc/rc.conf.d/hostname'
COMMAND = "hostname"
def __init__(self, module):
super(FreeBSDStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
if not os.path.isfile(self.FILE):
return ''
try:
for line in get_file_lines(self.FILE):
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
if os.path.isfile(self.FILE):
lines = [x.strip() for x in get_file_lines(self.FILE)]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
else:
lines = ['hostname="%s"' % name]
with open(self.FILE, 'w') as f:
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class DarwinStrategy(BaseStrategy):
"""
This is a macOS hostname manipulation strategy class. It uses
/usr/sbin/scutil to set ComputerName, HostName, and LocalHostName.
HostName corresponds to what most platforms consider to be hostname.
It controls the name used on the command line and SSH.
However, macOS also has LocalHostName and ComputerName settings.
LocalHostName controls the Bonjour/ZeroConf name, used by services
like AirDrop. This class implements a method, _scrub_hostname(), that mimics
the transformations macOS makes on hostnames when entered in the Sharing
preference pane. It replaces spaces with dashes and removes all special
characters.
ComputerName is the name used for user-facing GUI services, like the
System Preferences/Sharing pane and when users connect to the Mac over the network.
"""
def __init__(self, module):
super(DarwinStrategy, self).__init__(module)
self.scutil = self.module.get_bin_path('scutil', True)
self.name_types = ('HostName', 'ComputerName', 'LocalHostName')
self.scrubbed_name = self._scrub_hostname(self.module.params['name'])
def _scrub_hostname(self, name):
"""
LocalHostName only accepts valid DNS characters while HostName and ComputerName
accept a much wider range of characters. This function aims to mimic how macOS
translates a friendly name to the LocalHostName.
"""
# Replace all these characters with a single dash
name = to_text(name)
replace_chars = u'\'"~`!@#$%^&*(){}[]/=?+\\|-_ '
delete_chars = u".'"
table = str.maketrans(replace_chars, '-' * len(replace_chars), delete_chars)
name = name.translate(table)
# Replace multiple dashes with a single dash
while '-' * 2 in name:
name = name.replace('-' * 2, '')
name = name.rstrip('-')
return name
def get_current_hostname(self):
cmd = [self.scutil, '--get', 'HostName']
rc, out, err = self.module.run_command(cmd)
if rc != 0 and 'HostName: not set' not in err:
self.module.fail_json(msg="Failed to get current hostname rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def get_permanent_hostname(self):
cmd = [self.scutil, '--get', 'ComputerName']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Failed to get permanent hostname rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
for hostname_type in self.name_types:
cmd = [self.scutil, '--set', hostname_type]
if hostname_type == 'LocalHostName':
cmd.append(to_native(self.scrubbed_name))
else:
cmd.append(to_native(name))
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Failed to set {3} to '{2}': {0} {1}".format(to_native(out), to_native(err), to_native(name), hostname_type))
def set_current_hostname(self, name):
pass
def update_current_hostname(self):
pass
def update_permanent_hostname(self):
name = self.module.params['name']
# Get all the current host name values in the order of self.name_types
all_names = tuple(self.module.run_command([self.scutil, '--get', name_type])[1].strip() for name_type in self.name_types)
# Get the expected host name values based on the order in self.name_types
expected_names = tuple(self.scrubbed_name if n == 'LocalHostName' else name for n in self.name_types)
# Ensure all three names are updated
if all_names != expected_names:
if not self.module.check_mode:
self.set_permanent_hostname(name)
self.changed = True
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None # type: str | None
strategy_class = UnimplementedStrategy # type: t.Type[BaseStrategy]
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(Hostname)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.use = module.params['use']
if self.use is not None:
strat = globals()['%sStrategy' % STRATS[self.use]]
self.strategy = strat(module)
elif platform.system() == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
# This is Linux and systemd is active
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Sles'
try:
distribution_version = get_distribution_version()
# cast to float may raise ValueError on non SLES, we use float for a little more safety over int
if distribution_version and 10 <= float(distribution_version) <= 12:
strategy_class = SLESStrategy # type: t.Type[BaseStrategy]
else:
raise ValueError()
except ValueError:
strategy_class = UnimplementedStrategy
class RHELHostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class AnolisOSHostname(Hostname):
platform = 'Linux'
distribution = 'Anolis'
strategy_class = RedHatStrategy
class CloudlinuxserverHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinuxserver'
strategy_class = RedHatStrategy
class CloudlinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinux'
strategy_class = RedHatStrategy
class AlinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alinux'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle'
strategy_class = RedHatStrategy
class VirtuozzoLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Virtuozzo'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = FileStrategy
class KylinHostname(Hostname):
platform = 'Linux'
distribution = 'Kylin'
strategy_class = FileStrategy
class CumulusHostname(Hostname):
platform = 'Linux'
distribution = 'Cumulus-linux'
strategy_class = FileStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = FileStrategy
class ParrotHostname(Hostname):
platform = 'Linux'
distribution = 'Parrot'
strategy_class = FileStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = FileStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = FileStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = FileStrategy
class DevuanHostname(Hostname):
platform = 'Linux'
distribution = 'Devuan'
strategy_class = FileStrategy
class RaspbianHostname(Hostname):
platform = 'Linux'
distribution = 'Raspbian'
strategy_class = FileStrategy
class UosHostname(Hostname):
platform = 'Linux'
distribution = 'Uos'
strategy_class = FileStrategy
class DeepinHostname(Hostname):
platform = 'Linux'
distribution = 'Deepin'
strategy_class = FileStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NeonHostname(Hostname):
platform = 'Linux'
distribution = 'Neon'
strategy_class = FileStrategy
class DarwinHostname(Hostname):
platform = 'Darwin'
distribution = None
strategy_class = DarwinStrategy
class VoidLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Void'
strategy_class = FileStrategy
class PopHostname(Hostname):
platform = 'Linux'
distribution = 'Pop'
strategy_class = FileStrategy
class EurolinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Eurolinux'
strategy_class = RedHatStrategy
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
use=dict(type='str', choices=list(STRATS.keys()))
),
supports_check_mode=True,
)
hostname = Hostname(module)
name = module.params['name']
current_hostname = hostname.get_current_hostname()
permanent_hostname = hostname.get_permanent_hostname()
changed = hostname.update_current_and_permanent_hostname()
if name != current_hostname:
name_before = current_hostname
else:
name_before = permanent_hostname
# NOTE: socket.getfqdn() calls gethostbyaddr(socket.gethostname()), which can be
# slow to return if the name does not resolve correctly.
kw = dict(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if changed:
kw['diff'] = {'after': 'hostname = ' + name + '\n',
'before': 'hostname = ' + name_before + '\n'}
module.exit_json(**kw)
if __name__ == '__main__':
main()
| 28,729
|
Python
|
.py
| 700
| 33.114286
| 151
| 0.639157
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,145
|
rpm_key.py
|
ansible_ansible/lib/ansible/modules/rpm_key.py
|
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# Copyright: (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: rpm_key
author:
- Hector Acosta (@hacosta) <hector.acosta@gazzang.com>
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes C(rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
description:
- Key that will be modified. Can be a url, a file on the managed node, or a keyid if the key
already exists in the database.
type: str
required: true
state:
description:
- If the key will be imported or removed from the rpm db.
type: str
default: present
choices: [ absent, present ]
validate_certs:
description:
- If V(false) and the O(key) is a url starting with V(https), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
fingerprint:
description:
- The long-form fingerprint of the key being imported.
- This will be used to verify the specified key.
type: list
elements: str
version_added: 2.9
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: rhel
"""
EXAMPLES = """
- name: Import a key from a url
ansible.builtin.rpm_key:
state: present
key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
- name: Import a key from a file
ansible.builtin.rpm_key:
state: present
key: /path/to/key.gpg
- name: Ensure a key is not present in the db
ansible.builtin.rpm_key:
state: absent
key: DEADB33F
- name: Verify the key, using a fingerprint, before import
ansible.builtin.rpm_key:
key: /path/to/RPM-GPG-KEY.dag.txt
fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
- name: Verify the key, using multiple fingerprints, before import
ansible.builtin.rpm_key:
key: /path/to/RPM-GPG-KEY.dag.txt
fingerprint:
- EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
- 19B7 913E 6284 8E3F 4D78 D6B4 ECD9 1AB2 2EB6 8D86
"""
RETURN = r"""#"""
import re
import os.path
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.common.text.converters import to_native
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
class RpmKey(object):
def __init__(self, module):
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
fingerprint = module.params['fingerprint']
fingerprints = set()
if fingerprint:
if not isinstance(fingerprint, list):
fingerprint = [fingerprint]
fingerprints = set(f.replace(' ', '').upper() for f in fingerprint)
self.gpg = self.module.get_bin_path('gpg')
if not self.gpg:
self.gpg = self.module.get_bin_path('gpg2', required=True)
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
if fingerprints:
keyfile_fingerprints = self.getfingerprints(keyfile)
if not fingerprints.issubset(keyfile_fingerprints):
self.module.fail_json(
msg=("The specified fingerprint, '%s', "
"does not match any key fingerprints in '%s'") % (fingerprints, keyfile_fingerprints)
)
self.import_key(keyfile)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
rsp, info = fetch_url(self.module, url)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
self.module.add_cleanup_file(tmpname)
with os.fdopen(tmpfd, "w+b") as tmpfile:
tmpfile.write(key)
return tmpname
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
ret = keyid.strip().upper()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith('pub:'):
return line.split(':')[4]
self.module.fail_json(msg="Unexpected gpg output")
def getfingerprints(self, keyfile):
stdout, stderr = self.execute_command([
self.gpg, '--no-tty', '--batch', '--with-colons',
'--fixed-list-mode', '--import', '--import-options', 'show-only',
'--dry-run', keyfile
])
fingerprints = set()
for line in stdout.splitlines():
line = line.strip()
if line.startswith('fpr:'):
# As mentioned here,
#
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob_plain;f=doc/DETAILS
#
# The description of the `fpr` field says
#
# "fpr :: Fingerprint (fingerprint is in field 10)"
#
fingerprints.add(line.split(':')[9])
if fingerprints:
return fingerprints
self.module.fail_json(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
cmd = self.rpm + ' -q gpg-pubkey'
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0: # No key is installed on system
return False
cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
stdout, stderr = self.execute_command(cmd)
for line in stdout.splitlines():
if keyid in line.split(':')[4]:
return True
return False
def import_key(self, keyfile):
if not self.module.check_mode:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, keyid):
if not self.module.check_mode:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
key=dict(type='str', required=True, no_log=False),
fingerprint=dict(type='list', elements='str'),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
RpmKey(module)
if __name__ == '__main__':
main()
| 9,312
|
Python
|
.py
| 229
| 31.703057
| 127
| 0.600265
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,146
|
command.py
|
ansible_ansible/lib/ansible/modules/command.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: command
short_description: Execute commands on targets
version_added: historical
description:
- The M(ansible.builtin.command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes.
- The command(s) will not be
processed through the shell, so variables like C($HOSTNAME) and operations
like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
Use the M(ansible.builtin.shell) module if you need these features.
- To create C(command) tasks that are easier to read than the ones using space-delimited
arguments, pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task)
or use O(cmd) parameter.
- Either a free form command or O(cmd) parameter is required, see the examples.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.raw
attributes:
check_mode:
details: while the command itself is arbitrary and cannot be subject to the check mode semantics it adds O(creates)/O(removes) options as a workaround
support: partial
diff_mode:
support: none
platform:
support: full
platforms: posix
raw:
support: full
options:
expand_argument_vars:
description:
- Expands the arguments that are variables, for example C($HOME) will be expanded before being passed to the
command to run.
- Set to V(false) to disable expansion and treat the value as a literal argument.
type: bool
default: true
version_added: "2.16"
free_form:
description:
- The command module takes a free form string as a command to run.
- There is no actual parameter named C(free_form).
cmd:
type: str
description:
- The command to run.
argv:
type: list
elements: str
description:
- Passes the command as a list rather than a string.
- Use O(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
- Only the string (free form) or the list (argv) form can be provided, not both. One or the other must be provided.
version_added: "2.6"
creates:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file already exists, this step B(will not) be run.
- This is checked before O(removes) is checked.
removes:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file exists, this step B(will) be run.
- This is checked after O(creates) is checked.
version_added: "0.8"
chdir:
type: path
description:
- Change into this directory before running the command.
version_added: "0.6"
stdin:
description:
- Set the stdin of the command directly to the specified value.
type: str
version_added: "2.4"
stdin_add_newline:
type: bool
default: yes
description:
- If set to V(true), append a newline to stdin data.
version_added: "2.8"
strip_empty_ends:
description:
- Strip empty lines from the end of stdout/stderr in result.
version_added: "2.8"
type: bool
default: yes
notes:
- If you want to run a command through the shell (say you are using C(<), C(>), C(|), and so on),
you actually want the M(ansible.builtin.shell) module instead.
Parsing shell metacharacters can lead to unexpected commands being executed if quoting is not done correctly so it is more secure to
use the M(ansible.builtin.command) module when possible.
- O(creates), O(removes), and O(chdir) can be specified after the command.
For instance, if you only want to run a command if a certain file does not exist, use this.
- Check mode is supported when passing O(creates) or O(removes). If running in check mode and either of these are specified, the module will
check for the existence of the file and report the correct changed status. If these are not supplied, the task will be skipped.
- The O(ignore:executable) parameter is removed since version 2.4. If you have a need for this parameter, use the M(ansible.builtin.shell) module instead.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
- For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
- If the command returns non UTF-8 data, it must be encoded to avoid issues. This may necessitate using M(ansible.builtin.shell) so the output
can be piped through C(base64).
seealso:
- module: ansible.builtin.raw
- module: ansible.builtin.script
- module: ansible.builtin.shell
- module: ansible.windows.win_command
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
- name: Return motd to registered var
ansible.builtin.command: cat /etc/motd
register: mymotd
# free-form (string) arguments, all arguments on one line
- name: Run command if /path/to/database does not exist (without 'args')
ansible.builtin.command: /usr/bin/make_database.sh db_user db_name creates=/path/to/database
# free-form (string) arguments, some arguments on separate lines with the 'args' keyword
# 'args' is a task keyword, passed at the same level as the module
- name: Run command if /path/to/database does not exist (with 'args' keyword)
ansible.builtin.command: /usr/bin/make_database.sh db_user db_name
args:
creates: /path/to/database
# 'cmd' is module parameter
- name: Run command if /path/to/database does not exist (with 'cmd' parameter)
ansible.builtin.command:
cmd: /usr/bin/make_database.sh db_user db_name
creates: /path/to/database
- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist
ansible.builtin.command: /usr/bin/make_database.sh db_user db_name
become: yes
become_user: db_owner
args:
chdir: somedir/
creates: /path/to/database
# argv (list) arguments, each argument on a separate line, 'args' keyword not necessary
# 'argv' is a parameter, indented one level from the module
- name: Use 'argv' to send a command as a list - leave 'command' empty
ansible.builtin.command:
argv:
- /usr/bin/make_database.sh
- Username with whitespace
- dbname with whitespace
creates: /path/to/database
- name: Run command using argv with mixed argument formats
ansible.builtin.command:
argv:
- /path/to/binary
- -v
- --debug
- --longopt
- value for longopt
- --other-longopt=value for other longopt
- positional
- name: Safely use templated variable to run command. Always use the quote filter to avoid injection issues
ansible.builtin.command: cat {{ myfile|quote }}
register: myoutput
"""
RETURN = r"""
msg:
description: changed
returned: always
type: bool
sample: True
start:
description: The command execution start time.
returned: always
type: str
sample: '2017-09-29 22:03:48.083128'
end:
description: The command execution end time.
returned: always
type: str
sample: '2017-09-29 22:03:48.084657'
delta:
description: The command execution delta time.
returned: always
type: str
sample: '0:00:00.001529'
stdout:
description: The command standard output.
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master …'
stderr:
description: The command standard error.
returned: always
type: str
sample: 'ls cannot access foo: No such file or directory'
cmd:
description: The command executed by the task.
returned: always
type: list
sample:
- echo
- hello
rc:
description: The command return code (0 means success).
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines.
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
stderr_lines:
description: The command standard error split in lines.
returned: always
type: list
sample: [u'ls cannot access foo: No such file or directory', u'ls …']
"""
import datetime
import glob
import os
import shlex
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text
from ansible.module_utils.common.collections import is_iterable
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
# NOTE: ensure splitter.py is kept in sync for exceptions
module = AnsibleModule(
argument_spec=dict(
_raw_params=dict(),
_uses_shell=dict(type='bool', default=False),
argv=dict(type='list', elements='str'),
chdir=dict(type='path'),
executable=dict(),
expand_argument_vars=dict(type='bool', default=True),
creates=dict(type='path'),
removes=dict(type='path'),
# The default for this really comes from the action plugin
stdin=dict(required=False),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
),
supports_check_mode=True,
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['_raw_params']
argv = module.params['argv']
creates = module.params['creates']
removes = module.params['removes']
stdin = module.params['stdin']
stdin_add_newline = module.params['stdin_add_newline']
strip = module.params['strip_empty_ends']
expand_argument_vars = module.params['expand_argument_vars']
# we promised these in 'always' ( _lines get auto-added on action plugin)
r = {'changed': False, 'stdout': '', 'stderr': '', 'rc': None, 'cmd': None, 'start': None, 'end': None, 'delta': None, 'msg': ''}
if not shell and executable:
module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
executable = None
if (not args or args.strip() == '') and not argv:
r['rc'] = 256
r['msg'] = "no command given"
module.fail_json(**r)
if args and argv:
r['rc'] = 256
r['msg'] = "only command or argv can be given, not both"
module.fail_json(**r)
if not shell and args:
args = shlex.split(args)
args = args or argv
# All args must be strings
if is_iterable(args, include_strings=False):
args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
r['cmd'] = args
if chdir:
chdir = to_bytes(chdir, errors='surrogate_or_strict')
try:
os.chdir(chdir)
except (IOError, OSError) as e:
r['msg'] = 'Unable to change directory before execution: %s' % to_text(e)
module.fail_json(**r)
# check_mode partial support, since it only really works in checking creates/removes
if module.check_mode:
shoulda = "Would"
else:
shoulda = "Did"
# special skips for idempotence if file exists (assumes command creates)
if creates:
if glob.glob(creates):
r['msg'] = "%s not run command since '%s' exists" % (shoulda, creates)
r['stdout'] = "skipped, since %s exists" % creates # TODO: deprecate
r['rc'] = 0
# special skips for idempotence if file does not exist (assumes command removes)
if not r['msg'] and removes:
if not glob.glob(removes):
r['msg'] = "%s not run command since '%s' does not exist" % (shoulda, removes)
r['stdout'] = "skipped, since %s does not exist" % removes # TODO: deprecate
r['rc'] = 0
if r['msg']:
module.exit_json(**r)
r['changed'] = True
# actually executes command (or not ...)
if not module.check_mode:
r['start'] = datetime.datetime.now()
r['rc'], r['stdout'], r['stderr'] = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None,
data=stdin, binary_data=(not stdin_add_newline),
expand_user_and_vars=expand_argument_vars)
r['end'] = datetime.datetime.now()
else:
# this is partial check_mode support, since we end up skipping if we get here
r['rc'] = 0
r['msg'] = "Command would have run if not in check mode"
if creates is None and removes is None:
r['skipped'] = True
# skipped=True and changed=True are mutually exclusive
r['changed'] = False
# convert to text for jsonization and usability
if r['start'] is not None and r['end'] is not None:
# these are datetime objects, but need them as strings to pass back
r['delta'] = to_text(r['end'] - r['start'])
r['end'] = to_text(r['end'])
r['start'] = to_text(r['start'])
if strip:
r['stdout'] = to_text(r['stdout']).rstrip("\r\n")
r['stderr'] = to_text(r['stderr']).rstrip("\r\n")
if r['rc'] != 0:
r['msg'] = 'non-zero return code'
module.fail_json(**r)
module.exit_json(**r)
if __name__ == '__main__':
main()
| 13,962
|
Python
|
.py
| 335
| 35.946269
| 159
| 0.675282
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,147
|
raw.py
|
ansible_ansible/lib/ansible/modules/raw.py
|
# This is a virtual module that is entirely implemented server side
# Copyright: (c) 2012, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: raw
short_description: Executes a low-down and dirty command
version_added: historical
options:
free_form:
description:
- The raw module takes a free form command to run.
- There is no parameter actually named 'free form'; see the examples!
required: true
executable:
description:
- Change the shell used to execute the command. Should be an absolute path to the executable.
- When using privilege escalation (C(become)) a default shell will be assigned if one is not provided
as privilege escalation requires a shell.
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem.
- This is useful and should only be done in a few cases. A common
case is installing C(python) on a system without python installed by default.
Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(ansible.builtin.shell) or M(ansible.builtin.command) module is much more appropriate.
- Arguments given to C(raw) are run directly through the configured remote shell.
- Standard output, error output and return code are returned when
available.
- There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(ansible.builtin.script) module.
- This module is also supported for Windows targets.
- If the command returns non UTF-8 data, it must be encoded to avoid issues. One option is to pipe
the output through C(base64).
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.raw
attributes:
check_mode:
support: none
diff_mode:
support: none
platform:
details: This action is one of the few that requires no Python on the remote as it passes the command directly into the connection string
platforms: all
raw:
support: full
notes:
- "If using raw from a playbook, you may need to disable fact gathering
using C(gather_facts: no) if you're using C(raw) to bootstrap python
onto the machine."
- If you want to execute a command securely and predictably, it may be
better to use the M(ansible.builtin.command) or M(ansible.builtin.shell) modules instead.
- The C(environment) keyword does not work with raw normally, it requires a shell
which means it only works if C(executable) is set or using the module
with privilege escalation (C(become)).
seealso:
- module: ansible.builtin.command
- module: ansible.builtin.shell
- module: ansible.windows.win_command
- module: ansible.windows.win_shell
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
- name: Bootstrap a host without python2 installed
ansible.builtin.raw: dnf install -y python2 python2-dnf libselinux-python
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
ansible.builtin.raw: cat < /tmp/*txt
args:
executable: /bin/bash
- name: Safely use templated variables. Always use quote filter to avoid injection issues.
ansible.builtin.raw: "{{ package_mgr|quote }} {{ pkg_flags|quote }} install {{ python|quote }}"
- name: List user accounts on a Windows system
ansible.builtin.raw: Get-WmiObject -Class Win32_UserAccount
"""
| 3,741
|
Python
|
.py
| 81
| 41.703704
| 145
| 0.736035
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,148
|
fetch.py
|
ansible_ansible/lib/ansible/modules/fetch.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import annotations
DOCUMENTATION = r"""
---
module: fetch
short_description: Fetch files from remote nodes
description:
- This module works like M(ansible.builtin.copy), but in reverse.
- It is used for fetching files from remote machines and storing them locally in a file tree, organized by hostname.
- Files that already exist at O(dest) will be overwritten if they are different than the O(src).
- This module is also supported for Windows targets.
version_added: '0.2'
options:
src:
description:
- The file on the remote system to fetch.
- This I(must) be a file, not a directory.
- Recursive fetching may be supported in a later release.
required: yes
dest:
description:
- A directory to save the file into.
- For example, if O(dest=/backup), then O(src=/etc/profile) on host
C(host.example.com), would save the file into C(/backup/host.example.com/etc/profile).
The host name is based on the inventory name.
required: yes
fail_on_missing:
version_added: '1.1'
description:
- When set to V(true), the task will fail if the remote file cannot be read for any reason.
- Prior to Ansible 2.5, setting this would only fail if the source file was missing.
- The default was changed to V(true) in Ansible 2.5.
type: bool
default: yes
validate_checksum:
version_added: '1.4'
description:
- Verify that the source and destination checksums match after the files are fetched.
type: bool
default: yes
flat:
version_added: '1.2'
description:
- Allows you to override the default behavior of appending hostname/path/to/file to the destination.
- If O(dest) ends with '/', it will use the basename of the source file, similar to the copy module.
- This can be useful if working with a single host, or if retrieving files that are uniquely named per host.
- If using multiple hosts with the same filename, the file will be overwritten for each host.
type: bool
default: no
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.files
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix, windows
safe_file_operations:
support: none
vault:
support: none
notes:
- When running fetch with C(become), the M(ansible.builtin.slurp) module will also be
used to fetch the contents of the file for determining the remote
checksum. This effectively doubles the transfer size, and
depending on the file size can consume all available memory on the
remote or local hosts causing a C(MemoryError). Due to this it is
advisable to run this module without C(become) whenever possible.
- Prior to Ansible 2.5 this module would not fail if reading the remote
file was impossible unless O(fail_on_missing) was set.
- In Ansible 2.5 or later, playbook authors are encouraged to use
C(fail_when) or C(ignore_errors) to get this ability. They may
also explicitly set O(fail_on_missing) to V(false) to get the
non-failing behaviour.
seealso:
- module: ansible.builtin.copy
- module: ansible.builtin.slurp
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
- name: Store file into /tmp/fetched/host.example.com/tmp/somefile
ansible.builtin.fetch:
src: /tmp/somefile
dest: /tmp/fetched
- name: Specifying a path directly
ansible.builtin.fetch:
src: /tmp/somefile
dest: /tmp/prefix-{{ inventory_hostname }}
flat: yes
- name: Specifying a destination path
ansible.builtin.fetch:
src: /tmp/uniquefile
dest: /tmp/special/
flat: yes
- name: Storing in a path relative to the playbook
ansible.builtin.fetch:
src: /tmp/uniquefile
dest: special/prefix-{{ inventory_hostname }}
flat: yes
"""
| 4,191
|
Python
|
.py
| 114
| 33.333333
| 116
| 0.736726
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,149
|
group_by.py
|
ansible_ansible/lib/ansible/modules/group_by.py
|
# -*- mode: python -*-
# Copyright: (c) 2012, Jeroen Hoekx (@jhoekx)
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: group_by
short_description: Create Ansible groups based on facts
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
- This module is also supported for Windows targets.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups.
type: str
required: true
parents:
description:
- The list of the parent groups.
type: list
elements: str
default: all
version_added: "2.4"
attributes:
action:
support: full
become:
support: none
bypass_host_loop:
support: none
bypass_task_loop:
support: none
check_mode:
details: While this makes no changes to target systems the 'in memory' inventory will still be altered
support: partial
core:
details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
support: partial
connection:
support: none
delegation:
support: none
diff_mode:
support: none
platform:
platforms: all
notes:
- Spaces in group names are converted to dashes '-'.
- Though this module does not change the remote host,
we do provide 'changed' status as it can be useful
for those trying to track inventory changes.
seealso:
- module: ansible.builtin.add_host
author:
- Jeroen Hoekx (@jhoekx)
"""
EXAMPLES = r"""
- name: Create groups based on the machine architecture
ansible.builtin.group_by:
key: machine_{{ ansible_machine }}
- name: Create groups like 'virt_kvm_host'
ansible.builtin.group_by:
key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
- name: Create nested groups
ansible.builtin.group_by:
key: el{{ ansible_distribution_major_version }}-{{ ansible_architecture }}
parents:
- el{{ ansible_distribution_major_version }}
- name: Add all active hosts to a static group
ansible.builtin.group_by:
key: done
"""
| 2,416
|
Python
|
.py
| 80
| 26.5
| 148
| 0.719502
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,150
|
pause.py
|
ansible_ansible/lib/ansible/modules/pause.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: pause
short_description: Pause playbook execution
description:
- Pauses playbook execution for a set amount of time, or until a prompt is acknowledged.
All parameters are optional. The default behavior is to pause with a prompt.
- To pause/wait/sleep per host, use the M(ansible.builtin.wait_for) module.
- You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely.
To continue early press C(ctrl+c) and then C(c). To abort a playbook press C(ctrl+c) and then C(a).
- Prompting for a set amount of time is not supported. Pausing playbook execution is interruptible but does not return user input.
- The pause module integrates into async/parallelized playbooks without any special considerations (see Rolling Updates).
When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts.
- This module is also supported for Windows targets.
version_added: "0.8"
options:
minutes:
description:
- A positive number of minutes to pause for.
seconds:
description:
- A positive number of seconds to pause for.
prompt:
description:
- Optional text to use for the prompt message.
- User input is only returned if O(seconds) and O(minutes) are both not specified,
otherwise this is just a custom message before playbook execution is paused.
echo:
description:
- Controls whether or not keyboard input is shown when typing.
- Only has effect if neither O(seconds) nor O(minutes) are set.
type: bool
default: 'yes'
version_added: 2.5
author: "Tim Bielawa (@tbielawa)"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
become:
support: none
bypass_host_loop:
support: full
check_mode:
support: full
connection:
support: none
delegation:
support: none
diff_mode:
support: none
platform:
platforms: all
notes:
- Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
- User input is not captured or echoed, regardless of echo setting, when minutes or seconds is specified.
"""
EXAMPLES = """
- name: Pause for 5 minutes to build app cache
ansible.builtin.pause:
minutes: 5
- name: Pause until you can verify updates to an application were successful
ansible.builtin.pause:
- name: A helpful reminder of what to look out for post-update
ansible.builtin.pause:
prompt: "Make sure org.foo.FooOverload exception is not present"
- name: Pause to get some sensitive input
ansible.builtin.pause:
prompt: "Enter a secret"
echo: no
"""
RETURN = """
user_input:
description: User input from interactive console
returned: if no waiting time set
type: str
sample: Example user input
start:
description: Time when started pausing
returned: always
type: str
sample: "2017-02-23 14:35:07.298862"
stop:
description: Time when ended pausing
returned: always
type: str
sample: "2017-02-23 14:35:09.552594"
delta:
description: Time paused in seconds
returned: always
type: str
sample: 2
stdout:
description: Output of pause module
returned: always
type: str
sample: Paused for 0.04 minutes
echo:
description: Value of echo setting
returned: always
type: bool
sample: true
"""
| 3,796
|
Python
|
.py
| 111
| 30.405405
| 143
| 0.732391
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,151
|
apt_key.py
|
ansible_ansible/lib/ansible/modules/apt_key.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: apt_key
author:
- Jayson Vantuyl (@jvantuyl)
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it.
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: debian
notes:
- The C(apt-key) command used by this module has been deprecated. See the L(Debian wiki,https://wiki.debian.org/DebianRepository/UseThirdParty) for details.
This module is kept for backwards compatibility for systems that still use C(apt-key) as the main way to manage apt repository keys.
- As a sanity check, downloaded key id must match the one specified.
- "Use full fingerprint (40 characters) key ids to avoid key collisions.
To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
- If you specify both the key O(id) and the O(url) with O(state=present), the task can verify or add the key as needed.
- Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's C(update_cache) option).
requirements:
- gpg
seealso:
- module: ansible.builtin.deb822_repository
options:
id:
description:
- The identifier of the key.
- Including this allows check mode to correctly report the changed state.
- If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
- This parameter is required when O(state) is set to V(absent).
type: str
data:
description:
- The keyfile contents to add to the keyring.
type: str
file:
description:
- The path to a keyfile on the remote server to add to the keyring.
type: path
keyring:
description:
- The full path to specific keyring file in C(/etc/apt/trusted.gpg.d/).
type: path
version_added: "1.3"
url:
description:
- The URL to retrieve key from.
type: str
keyserver:
description:
- The keyserver to retrieve key from.
type: str
version_added: "1.6"
state:
description:
- Ensures that the key is present (added) or absent (revoked).
type: str
choices: [ absent, present ]
default: present
validate_certs:
description:
- If V(false), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
"""
EXAMPLES = """
- name: One way to avoid apt_key once it is removed from your distro, armored keys should use .asc extension, binary should use .gpg
block:
- name: somerepo | no apt key
ansible.builtin.get_url:
url: https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x36a1d7869245c8950f966e92d8576a8ba88d21e9
dest: /etc/apt/keyrings/myrepo.asc
checksum: sha256:bb42f0db45d46bab5f9ec619e1a47360b94c27142e57aa71f7050d08672309e0
- name: somerepo | apt source
ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
- name: Add an apt key by id from a keyserver
ansible.builtin.apt_key:
keyserver: keyserver.ubuntu.com
id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
- name: Add an Apt signing key, uses whichever key is at the URL
ansible.builtin.apt_key:
url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
state: present
- name: Add an Apt signing key, will not download if present
ansible.builtin.apt_key:
id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
state: present
- name: Remove a Apt specific signing key, leading 0x is valid
ansible.builtin.apt_key:
id: 0x9FED2BCBDCD29CDF762678CBAED4B06F473041FA
state: absent
# Use armored file since utf-8 string is expected. Must be of "PGP PUBLIC KEY BLOCK" type.
- name: Add a key from a file on the Ansible server
ansible.builtin.apt_key:
data: "{{ lookup('ansible.builtin.file', 'apt.asc') }}"
state: present
- name: Add an Apt signing key to a specific keyring file
ansible.builtin.apt_key:
id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
keyring: /etc/apt/trusted.gpg.d/debian.gpg
- name: Add Apt signing key on remote server to keyring
ansible.builtin.apt_key:
id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
file: /tmp/apt.gpg
state: present
"""
RETURN = """
after:
description: List of apt key ids or fingerprints after any modification
returned: on change
type: list
sample: ["D8576A8BA88D21E9", "3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
before:
description: List of apt key ids or fingprints before any modifications
returned: always
type: list
sample: ["3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
fp:
description: Fingerprint of the key to import
returned: always
type: str
sample: "D8576A8BA88D21E9"
id:
description: key id from source
returned: always
type: str
sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
key_id:
description: calculated key id, it should be same as 'id', but can be different
returned: always
type: str
sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
short_id:
description: calculated short key id
returned: always
type: str
sample: "A88D21E9"
"""
import os
# FIXME: standardize into module_common
from traceback import format_exc
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.urls import fetch_url
apt_key_bin = None
gpg_bin = None
locale = None
def lang_env(module):
if not hasattr(lang_env, 'result'):
locale = get_best_parsable_locale(module)
lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale)
return lang_env.result
def find_needed_binaries(module):
global apt_key_bin
global gpg_bin
apt_key_bin = module.get_bin_path('apt-key', required=True)
gpg_bin = module.get_bin_path('gpg', required=True)
def add_http_proxy(cmd):
for envvar in ('HTTPS_PROXY', 'https_proxy', 'HTTP_PROXY', 'http_proxy'):
proxy = os.environ.get(envvar)
if proxy:
break
if proxy:
cmd += ' --keyserver-options http-proxy=%s' % proxy
return cmd
def parse_key_id(key_id):
"""validate the key_id and break it into segments
:arg key_id: The key_id as supplied by the user. A valid key_id will be
8, 16, or more hexadecimal chars with an optional leading ``0x``.
:returns: The portion of key_id suitable for apt-key del, the portion
suitable for comparisons with --list-public-keys, and the portion that
can be used with --recv-key. If key_id is long enough, these will be
the last 8 characters of key_id, the last 16 characters, and all of
key_id. If key_id is not long enough, some of the values will be the
same.
* apt-key del <= 1.10 has a bug with key_id != 8 chars
* apt-key adv --list-public-keys prints 16 chars
* apt-key adv --recv-key can take more chars
"""
# Make sure the key_id is valid hexadecimal
int(to_native(key_id), 16)
key_id = key_id.upper()
if key_id.startswith('0X'):
key_id = key_id[2:]
key_id_len = len(key_id)
if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
short_key_id = key_id[-8:]
fingerprint = key_id
if key_id_len > 16:
fingerprint = key_id[-16:]
return short_key_id, fingerprint, key_id
def parse_output_for_keys(output, short_format=False):
found = []
lines = to_native(output).split('\n')
for line in lines:
if (line.startswith("pub") or line.startswith("sub")) and "expired" not in line:
try:
# apt key format
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
except (IndexError, ValueError):
# gpg format
try:
tokens = line.split(':')
real_code = tokens[4]
except (IndexError, ValueError):
# invalid line, skip
continue
found.append(real_code)
if found and short_format:
found = shorten_key_ids(found)
return found
def all_keys(module, keyring, short_format):
if keyring is not None:
cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
else:
cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="Unable to list public keys", cmd=cmd, rc=rc, stdout=out, stderr=err)
return parse_output_for_keys(out, short_format)
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
try:
# note: validate_certs and other args are pulled from module directly
rsp, info = fetch_url(module, url, use_proxy=True)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def get_key_id_from_file(module, filename, data=None):
native_data = to_native(data)
is_armored = native_data.find("-----BEGIN PGP PUBLIC KEY BLOCK-----") >= 0
key = None
cmd = [gpg_bin, '--with-colons', filename]
(rc, out, err) = module.run_command(cmd, environ_update=lang_env(module), data=(native_data if is_armored else data), binary_data=not is_armored)
if rc != 0:
module.fail_json(msg="Unable to extract key from '%s'" % ('inline data' if data is not None else filename), stdout=out, stderr=err)
keys = parse_output_for_keys(out)
# assume we only want first key?
if keys:
key = keys[0]
return key
def get_key_id_from_data(module, data):
return get_key_id_from_file(module, '-', data)
def import_key(module, keyring, keyserver, key_id):
if keyring:
cmd = "%s --keyring %s adv --no-tty --keyserver %s" % (apt_key_bin, keyring, keyserver)
else:
cmd = "%s adv --no-tty --keyserver %s" % (apt_key_bin, keyserver)
# check for proxy
cmd = add_http_proxy(cmd)
# add recv argument as last one
cmd = "%s --recv %s" % (cmd, key_id)
for retry in range(5):
(rc, out, err) = module.run_command(cmd, environ_update=lang_env(module))
if rc == 0:
break
else:
# Out of retries
if rc == 2 and 'not found on keyserver' in out:
msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg, forced_environment=lang_env(module))
else:
msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg, forced_environment=lang_env(module), rc=rc, stdout=out, stderr=err)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
else:
cmd = "%s add -" % apt_key_bin
(rc, out, err) = module.run_command(cmd, data=data, binary_data=True)
if rc != 0:
module.fail_json(
msg="Unable to add a key from binary data",
cmd=cmd,
rc=rc,
stdout=out,
stderr=err,
)
else:
if keyring:
cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
else:
cmd = "%s add %s" % (apt_key_bin, keyfile)
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg="Unable to add a key from file %s" % (keyfile),
cmd=cmd,
rc=rc,
keyfile=keyfile,
stdout=out,
stderr=err,
)
return True
def remove_key(module, key_id, keyring):
if keyring:
cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
else:
cmd = '%s del %s' % (apt_key_bin, key_id)
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg="Unable to remove a key with id %s" % (key_id),
cmd=cmd,
rc=rc,
key_id=key_id,
stdout=out,
stderr=err,
)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(type='str'),
url=dict(type='str'),
data=dict(type='str'),
file=dict(type='path'),
keyring=dict(type='path'),
validate_certs=dict(type='bool', default=True),
keyserver=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
mutually_exclusive=(('data', 'file', 'keyserver', 'url'),),
)
# parameters
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
# internal vars
short_format = False
short_key_id = None
fingerprint = None
error_no_error = "apt-key did not return an error, but %s (check that the id is correct and *not* a subkey)"
# ensure we have requirements met
find_needed_binaries(module)
# initialize result dict
r = {'changed': False}
if not key_id:
if keyserver:
module.fail_json(msg="Missing key_id, required with keyserver.")
if url:
data = download_key(module, url)
if filename:
key_id = get_key_id_from_file(module, filename)
elif data:
key_id = get_key_id_from_data(module, data)
r['id'] = key_id
try:
short_key_id, fingerprint, key_id = parse_key_id(key_id)
r['short_id'] = short_key_id
r['fp'] = fingerprint
r['key_id'] = key_id
except ValueError:
module.fail_json(msg='Invalid key_id', **r)
if not fingerprint:
# invalid key should fail well before this point, but JIC ...
module.fail_json(msg="Unable to continue as we could not extract a valid fingerprint to compare against existing keys.", **r)
if len(key_id) == 8:
short_format = True
# get existing keys to verify if we need to change
r['before'] = keys = all_keys(module, keyring, short_format)
keys2 = []
if state == 'present':
if (short_format and short_key_id not in keys) or (not short_format and fingerprint not in keys):
r['changed'] = True
if not module.check_mode:
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyring, keyserver, key_id)
elif data:
# this also takes care of url if key_id was not provided
add_key(module, "-", keyring, data)
elif url:
# we hit this branch only if key_id is supplied with url
data = download_key(module, url)
add_key(module, "-", keyring, data)
else:
module.fail_json(msg="No key to add ... how did i get here?!?!", **r)
# verify it got added
r['after'] = keys2 = all_keys(module, keyring, short_format)
if (short_format and short_key_id not in keys2) or (not short_format and fingerprint not in keys2):
module.fail_json(msg=error_no_error % 'failed to add the key', **r)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required to remove a key", **r)
if fingerprint in keys:
r['changed'] = True
if not module.check_mode:
# we use the "short" id: key_id[-8:], short_format=True
# it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
if short_key_id is not None and remove_key(module, short_key_id, keyring):
r['after'] = keys2 = all_keys(module, keyring, short_format)
if fingerprint in keys2:
module.fail_json(msg=error_no_error % 'the key was not removed', **r)
else:
module.fail_json(msg="error removing key_id", **r)
module.exit_json(**r)
if __name__ == '__main__':
main()
| 18,125
|
Python
|
.py
| 437
| 33.576659
| 160
| 0.627465
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,152
|
file.py
|
ansible_ansible/lib/ansible/modules/file.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: file
version_added: historical
short_description: Manage files and file properties
extends_documentation_fragment: [files, action_common_attributes]
description:
- Set attributes of files, directories, or symlinks and their targets.
- Alternatively, remove files, symlinks or directories.
- Many other modules support the same options as the M(ansible.builtin.file) module - including M(ansible.builtin.copy),
M(ansible.builtin.template), and M(ansible.builtin.assemble).
- For Windows targets, use the M(ansible.windows.win_file) module instead.
options:
path:
description:
- Path to the file being managed.
type: path
required: yes
aliases: [ dest, name ]
state:
description:
- If V(absent), directories will be recursively deleted, and files or symlinks will
be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
under C(path_contents). Note that V(absent) will not cause M(ansible.builtin.file) to fail if the O(path) does
not exist as the state did not change.
- If V(directory), all intermediate subdirectories will be created if they
do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
- If V(file), with no other options, returns the current state of C(path).
- If V(file), even with other options (such as O(mode)), the file will be modified if it exists but will NOT be created if it does not exist.
Set to V(touch) or use the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want to create the file if it does not exist.
- If V(hard), the hard link will be created or changed.
- If V(link), the symbolic link will be created or changed.
- If V(touch) (new in 1.4), an empty file will be created if the file does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way V(touch) works from the command line).
- Default is the current state of the file if it exists, V(directory) if O(recurse=yes), or V(file) otherwise.
type: str
choices: [ absent, directory, file, hard, link, touch ]
src:
description:
- Path of the file to link to.
- This applies only to O(state=link) and O(state=hard).
- For O(state=link), this will also accept a non-existing path.
- Relative paths are relative to the file being created (O(path)) which is how
the Unix command C(ln -s SRC DEST) treats relative paths.
type: path
recurse:
description:
- Recursively set the specified file attributes on directory contents.
- This applies only when O(state) is set to V(directory).
type: bool
default: no
version_added: '1.1'
force:
description:
- >
Force the creation of the links in two cases: if the link type is symbolic and the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
O(path) file and create a link to the O(src) file in place of it).
type: bool
default: no
follow:
description:
- This flag indicates that filesystem links, if they exist, should be followed.
- O(follow=yes) and O(state=link) can modify O(src) when combined with parameters such as O(mode).
- Previous to Ansible 2.5, this was V(false) by default.
- While creating a symlink with a non-existent destination, set O(follow=false) to avoid a warning message related to permission issues.
The warning message is added to notify the user that we can not set permissions to the non-existent destination.
type: bool
default: yes
version_added: '1.8'
modification_time:
description:
- This parameter indicates the time the file's modification time should be set to.
- Should be V(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or V(now).
- Default is None meaning that V(preserve) is the default for O(state=[file,directory,link,hard]) and V(now) is default for O(state=touch).
type: str
version_added: "2.7"
modification_time_format:
description:
- When used with O(modification_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
access_time:
description:
- This parameter indicates the time the file's access time should be set to.
- Should be V(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or V(now).
- Default is V(None) meaning that V(preserve) is the default for O(state=[file,directory,link,hard]) and V(now) is default for O(state=touch).
type: str
version_added: '2.7'
access_time_format:
description:
- When used with O(access_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
seealso:
- module: ansible.builtin.assemble
- module: ansible.builtin.copy
- module: ansible.builtin.stat
- module: ansible.builtin.template
- module: ansible.windows.win_file
attributes:
check_mode:
support: full
diff_mode:
details: permissions and ownership will be shown but file contents on absent/touch will not.
support: partial
platform:
platforms: posix
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
- name: Change file ownership, group and permissions
ansible.builtin.file:
path: /etc/foo.conf
owner: foo
group: foo
mode: '0644'
- name: Give insecure permissions to an existing file
ansible.builtin.file:
path: /work
owner: root
group: root
mode: '1777'
- name: Create a symbolic link
ansible.builtin.file:
src: /file/to/link/to
dest: /path/to/symlink
owner: foo
group: foo
state: link
- name: Create two hard links
ansible.builtin.file:
src: '/tmp/{{ item.src }}'
dest: '{{ item.dest }}'
state: hard
loop:
- { src: x, dest: y }
- { src: z, dest: k }
- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u=rw,g=r,o=r
- name: Touch the same file, but add/remove some permissions
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
- name: Touch again the same file, but do not change times this makes the task idempotent
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
modification_time: preserve
access_time: preserve
- name: Create a directory if it does not exist
ansible.builtin.file:
path: /etc/some_directory
state: directory
mode: '0755'
- name: Update modification and access time of given file
ansible.builtin.file:
path: /etc/some_file
state: file
modification_time: now
access_time: now
- name: Set access time based on seconds from epoch value
ansible.builtin.file:
path: /etc/another_file
state: file
access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
- name: Recursively change ownership of a directory
ansible.builtin.file:
path: /etc/foo
state: directory
recurse: yes
owner: foo
group: foo
- name: Remove file (delete file)
ansible.builtin.file:
path: /etc/foo.txt
state: absent
- name: Recursively remove directory
ansible.builtin.file:
path: /etc/foo
state: absent
"""
RETURN = r"""
dest:
description: Destination file/path, equal to the value passed to O(path).
returned: O(state=touch), O(state=hard), O(state=link)
type: str
sample: /path/to/file.txt
path:
description: Destination file/path, equal to the value passed to O(path).
returned: O(state=absent), O(state=directory), O(state=file)
type: str
sample: /path/to/file.txt
"""
import errno
import os
import shutil
import time
from pwd import getpwnam, getpwuid
from grp import getgrnam, getgrgid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.common.sentinel import Sentinel
# There will only be a single AnsibleModule object per module
module = None
def additional_parameter_handling(module):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
# I think this is where we want to be in the future:
# when isdir(path):
# if state == absent: Remove the directory
# if state == touch: Touch the directory
# if state == directory: Assert the directory is the same as the one specified
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
params = module.params
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
if params['_original_basename']:
basename = params['_original_basename']
elif params['src']:
basename = os.path.basename(params['src'])
if basename:
params['path'] = os.path.join(params['path'], basename)
# state should default to file, but since that creates many conflicts,
# default state to 'current' when it exists.
prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
if params['state'] is None:
if prev_state != 'absent':
params['state'] = prev_state
elif params['recurse']:
params['state'] = 'directory'
else:
params['state'] = 'file'
# make sure the target path is a directory when we're doing a recursive operation
if params['recurse'] and params['state'] != 'directory':
module.fail_json(
msg="recurse option requires state to be 'directory'",
path=params["path"]
)
# Fail if 'src' but no 'state' is specified
if params['src'] and params['state'] not in ('link', 'hard'):
module.fail_json(
msg="src option requires state to be 'link' or 'hard'",
path=params['path']
)
def get_state(path):
""" Find out current state """
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
if os.path.lexists(b_path):
if os.path.islink(b_path):
return 'link'
elif os.path.isdir(b_path):
return 'directory'
elif os.stat(b_path).st_nlink > 1:
return 'hard'
# could be many other things, but defaulting to file
return 'file'
return 'absent'
except OSError as e:
if e.errno == errno.ENOENT: # It may already have been removed
return 'absent'
else:
raise
# This should be moved into the common file utilities
def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
changed = False
try:
for b_root, b_dirs, b_files in os.walk(b_path):
for b_fsobj in b_dirs + b_files:
b_fsname = os.path.join(b_root, b_fsobj)
if not os.path.islink(b_fsname):
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
else:
# Change perms on the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
if follow:
b_fsname = os.path.join(b_root, os.readlink(b_fsname))
# The link target could be nonexistent
if os.path.exists(b_fsname):
if os.path.isdir(b_fsname):
# Link is a directory so change perms on the directory's contents
changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
# Change perms on the file pointed to by the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
except RuntimeError as e:
# on Python3 "RecursionError" is raised which is derived from "RuntimeError"
# TODO once this function is moved into the common file utilities, this should probably raise more general exception
module.fail_json(
msg=f"Could not recursively set attributes on {to_native(b_path)}. Original error was: '{to_native(e)}'"
)
return changed
def initial_diff(path, state, prev_state):
diff = {'before': {'path': path},
'after': {'path': path},
}
if prev_state != state:
diff['before']['state'] = prev_state
diff['after']['state'] = state
if state == 'absent' and prev_state == 'directory':
walklist = {
'directories': [],
'files': [],
}
b_path = to_bytes(path, errors='surrogate_or_strict')
for base_path, sub_folders, files in os.walk(b_path):
for folder in sub_folders:
folderpath = os.path.join(base_path, folder)
walklist['directories'].append(folderpath)
for filename in files:
filepath = os.path.join(base_path, filename)
walklist['files'].append(filepath)
diff['before']['path_content'] = walklist
return diff
#
# States
#
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
if formatted_time == 'now':
return Sentinel
try:
struct = time.strptime(formatted_time, time_format)
struct_time = time.mktime(struct)
except (ValueError, OverflowError) as e:
module.fail_json(
msg=f"Error while obtaining timestamp for time {formatted_time} using format {time_format}: {to_native(e, nonstring='simplerepr')}",
)
return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
# When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
# https://github.com/ansible/ansible/issues/50943
if mtime is Sentinel and atime is Sentinel:
# It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
# not be updated. Just use the current time for the diff values
mtime = atime = time.time()
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
set_time = None
else:
# If both parameters are None 'preserve', nothing to do
if mtime is None and atime is None:
return False
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
if mtime is None:
mtime = previous_mtime
elif mtime is Sentinel:
mtime = time.time()
if atime is None:
atime = previous_atime
elif atime is Sentinel:
atime = time.time()
# If both timestamps are already ok, nothing to do
if mtime == previous_mtime and atime == previous_atime:
return False
set_time = (atime, mtime)
if not module.check_mode:
os.utime(b_path, set_time)
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
if 'after' not in diff:
diff['after'] = {}
if mtime != previous_mtime:
diff['before']['mtime'] = previous_mtime
diff['after']['mtime'] = mtime
if atime != previous_atime:
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
module.fail_json(
msg=f"Error while updating modification or access time: {to_native(e, nonstring='simplerepr')}",
path=path
)
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
if state == 'touch' and parameter is None:
return 'now'
return parameter
def execute_diff_peek(path):
"""Take a guess as to whether a file is a binary file"""
b_path = to_bytes(path, errors='surrogate_or_strict')
appears_binary = False
try:
with open(b_path, 'rb') as f:
head = f.read(8192)
except Exception:
# If we can't read the file, we're okay assuming it's text
pass
else:
if b"\x00" in head:
appears_binary = True
return appears_binary
def ensure_absent(path):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
result = {}
if prev_state != 'absent':
diff = initial_diff(path, 'absent', prev_state)
if not module.check_mode:
if prev_state == 'directory':
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
module.fail_json(
msg=f"rmtree failed: {to_native(e)}"
)
else:
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
module.fail_json(
msg=f"unlinking failed: {to_native(e)}",
path=path
)
result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
else:
result.update({'path': path, 'changed': False, 'state': 'absent'})
return result
def execute_touch(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
changed = False
result = {'dest': path}
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# If the file did not already exist
if prev_state == 'absent':
# if we are in check mode and the file is absent
# we can set the changed status to True and return
if module.check_mode:
result['changed'] = True
return result
# Create an empty file
try:
open(b_path, 'wb').close()
changed = True
except (OSError, IOError) as e:
module.fail_json(
msg=f"Error, could not touch target: {to_native(e, nonstring='simplerepr')}",
path=path
)
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
try:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
os.remove(b_path)
raise
result['changed'] = changed
result['diff'] = diff
return result
def ensure_file_attributes(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if prev_state != 'file':
if follow and prev_state == 'link':
# follow symlink and operate on original
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
prev_state = get_state(b_path)
file_args['path'] = path
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
module.fail_json(
msg=f"file ({path}) is {prev_state}, cannot continue",
path=path,
state=prev_state
)
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_directory(path, follow, recurse, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# For followed symlinks, we need to operate on the target of the link
if follow and prev_state == 'link':
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
file_args['path'] = path
prev_state = get_state(b_path)
changed = False
diff = initial_diff(path, 'directory', prev_state)
if prev_state == 'absent':
# Create directory and assign permissions to it
if module.check_mode:
return {'path': path, 'changed': True, 'diff': diff}
curpath = ''
try:
# Split the path so we can apply filesystem attributes recursively
# from the root (/) directory for absolute paths or the base path
# of a relative path. We can then walk the appropriate directory
# path to apply attributes.
# Something like mkdir -p with mode applied to all of the newly created directories
for dirname in path.strip('/').split('/'):
curpath = '/'.join([curpath, dirname])
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
if not os.path.exists(b_curpath):
try:
os.mkdir(b_curpath)
changed = True
except OSError as ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
module.fail_json(
msg=f"There was an issue creating {curpath} as requested: {to_native(e)}",
path=path
)
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
module.fail_json(
msg=f"{path} already exists as a {prev_state}",
path=path
)
#
# previous state == directory
#
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
if recurse:
changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_symlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
if src is None:
if follow and os.path.exists(b_path):
# use the current target of the link as the source
src = to_native(os.readlink(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
# If src is None that means we are expecting to update an existing link.
if src is None:
absrc = None
else:
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and src is not None and not os.path.exists(b_absrc):
module.fail_json(
msg="src file does not exist, use 'force=yes' if you"
f" really want to create the link: {absrc}",
path=path,
src=src
)
if prev_state == 'directory':
if not force:
module.fail_json(
msg=f'refusing to convert from {prev_state} to symlink for {path}',
path=path
)
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
module.fail_json(
msg=f'the directory {path} is not empty, refusing to convert it',
path=path
)
elif prev_state in ('file', 'hard') and not force:
module.fail_json(
msg=f'refusing to convert from {prev_state} to symlink for {path}',
path=path
)
diff = initial_diff(path, 'link', prev_state)
changed = False
if prev_state in ('hard', 'file', 'directory', 'absent'):
if src is None:
module.fail_json(
msg='src is required for creating new symlinks',
)
changed = True
elif prev_state == 'link':
if src is not None:
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
else:
module.fail_json(
msg='unexpected position reached',
dest=path,
src=src
)
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
os.rmdir(b_path)
os.symlink(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
module.fail_json(
msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}",
path=path
)
else:
try:
os.symlink(b_src, b_path)
except OSError as e:
module.fail_json(
msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}",
path=path
)
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
# Now that we might have created the symlink, get the arguments.
# We need to do it now so we can properly follow the symlink if needed
# because load_file_common_arguments sets 'path' according
# the value of follow and the symlink existence.
file_args = module.load_file_common_arguments(module.params)
# Whenever we create a link to a nonexistent target we know that the nonexistent target
# cannot have any permissions set on it. Skip setting those and emit a warning (the user
# can set follow=False to remove the warning)
if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
' set to False to avoid this.')
else:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def ensure_hardlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# src is the source of a hardlink. We require it if we are creating a new hardlink.
# We require path in the argument_spec so we know it is present at this point.
if prev_state != 'hard' and src is None:
module.fail_json(
msg='src is required for creating new hardlinks'
)
# Even if the link already exists, if src was specified it needs to exist.
# The inode number will be compared to ensure the link has the correct target.
if src is not None and not os.path.exists(b_src):
module.fail_json(
msg='src does not exist',
dest=path,
src=src
)
diff = initial_diff(path, 'hard', prev_state)
changed = False
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
if src is not None and os.stat(b_path).st_ino != os.stat(b_src).st_ino:
changed = True
if not force:
module.fail_json(
msg='Cannot link, different hard link exists at destination',
dest=path,
src=src
)
elif prev_state == 'file':
changed = True
if not force:
module.fail_json(
msg=f'Cannot link, {prev_state} exists at destination',
dest=path,
src=src
)
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
return {'path': path, 'changed': False}
elif not force:
module.fail_json(
msg='Cannot link: different hard link exists at destination',
dest=path,
src=src
)
else:
module.fail_json(
msg='unexpected position reached',
dest=path,
src=src
)
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
if os.path.exists(b_path):
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise
os.link(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
module.fail_json(
msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}",
path=path
)
else:
try:
if follow and os.path.islink(b_src):
b_src = os.readlink(b_src)
os.link(b_src, b_path)
except OSError as e:
module.fail_json(
msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}",
path=path
)
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def check_owner_exists(module, owner):
try:
uid = int(owner)
try:
getpwuid(uid).pw_name
except KeyError:
module.warn('failed to look up user with uid %s. Create user up to this point in real play' % uid)
except ValueError:
try:
getpwnam(owner).pw_uid
except KeyError:
module.warn('failed to look up user %s. Create user up to this point in real play' % owner)
def check_group_exists(module, group):
try:
gid = int(group)
try:
getgrgid(gid).gr_name
except KeyError:
module.warn('failed to look up group with gid %s. Create group up to this point in real play' % gid)
except ValueError:
try:
getgrnam(group).gr_gid
except KeyError:
module.warn('failed to look up group %s. Create group up to this point in real play' % group)
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
path=dict(type='path', required=True, aliases=['dest', 'name']),
_original_basename=dict(type='str'), # Internal use only, for recursive ops
recurse=dict(type='bool', default=False),
force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
follow=dict(type='bool', default=True), # Note: Different default than file_common_args
_diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
src=dict(type='path'), # Note: Should not be in file_common_args in future
modification_time=dict(type='str'),
modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
access_time=dict(type='str'),
access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
),
add_file_common_args=True,
supports_check_mode=True,
)
additional_parameter_handling(module)
params = module.params
state = params['state']
recurse = params['recurse']
force = params['force']
follow = params['follow']
path = params['path']
src = params['src']
if module.check_mode and state != 'absent':
file_args = module.load_file_common_arguments(module.params)
if file_args['owner']:
check_owner_exists(module, file_args['owner'])
if file_args['group']:
check_group_exists(module, file_args['group'])
timestamps = {}
timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
timestamps['modification_time_format'] = params['modification_time_format']
timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
timestamps['access_time_format'] = params['access_time_format']
# short-circuit for diff_peek
if params['_diff_peek'] is not None:
appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
if state == 'file':
result = ensure_file_attributes(path, follow, timestamps)
elif state == 'directory':
result = ensure_directory(path, follow, recurse, timestamps)
elif state == 'link':
result = ensure_symlink(path, src, follow, force, timestamps)
elif state == 'hard':
result = ensure_hardlink(path, src, follow, force, timestamps)
elif state == 'touch':
result = execute_touch(path, follow, timestamps)
elif state == 'absent':
result = ensure_absent(path)
if not module._diff:
result.pop('diff', None)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 39,669
|
Python
|
.py
| 882
| 35.60771
| 147
| 0.613448
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,153
|
dnf.py
|
ansible_ansible/lib/ansible/modules/dnf.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
# Copyright 2018 Adam Miller <admiller@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: dnf
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
options:
use_backend:
description:
- Backend module to use.
default: "auto"
choices:
auto: Automatically select the backend based on the C(ansible_facts.pkg_mgr) fact.
yum: Alias for V(auto) (see Notes)
dnf: M(ansible.builtin.dnf)
yum4: Alias for V(dnf)
dnf4: Alias for V(dnf)
dnf5: M(ansible.builtin.dnf5)
type: str
version_added: 2.15
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
When using state=latest, this can be '*' which means run: dnf -y update.
You can also pass a url or a local path to an rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0).
Spaces around the operator are required.
- You can also pass an absolute path for a binary which is provided by the package to install.
See examples for more information.
aliases:
- pkg
type: list
elements: str
default: []
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks.
Use M(ansible.builtin.package_facts) instead of the O(list) argument as a best practice.
type: str
state:
description:
- Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
- Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true),
then V(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- C(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
elements: str
default: []
disablerepo:
description:
- C(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
type: str
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if O(state=present) or O(state=latest).
- This setting affects packages installed from a repository as well as
"local" packages installed from the filesystem or a URL.
type: bool
default: 'no'
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
version_added: "2.3"
default: "/"
type: str
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
version_added: "2.6"
type: str
autoremove:
description:
- If V(true), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when O(state=absent).
type: bool
default: "no"
version_added: "2.4"
exclude:
description:
- Package name(s) to exclude when O(state=present), or latest. This can be a
list or a comma separated string.
version_added: "2.7"
type: list
elements: str
default: []
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
without raising an error. Equivalent to passing the C(--skip-broken) option.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
Has an effect only if O(state=present) or O(state=latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "2.7"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if O(state=present) or O(state=latest).
default: "no"
type: bool
version_added: "2.7"
security:
description:
- If set to V(true), and O(state=latest) then only installs updates that have been marked security related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
type: bool
default: "no"
version_added: "2.7"
bugfix:
description:
- If set to V(true), and O(state=latest) then only installs updates that have been marked bugfix related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
default: "no"
type: bool
version_added: "2.7"
enable_plugin:
description:
- C(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.7"
type: list
elements: str
default: []
disable_plugin:
description:
- C(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.7"
type: list
default: []
elements: str
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to V(all), disables all excludes.
- If set to V(main), disable excludes defined in C([main]) in C(dnf.conf).
- If set to V(repoid), disable excludes defined for given repo id.
version_added: "2.7"
type: str
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. For example, for localinstall.
If set to V(false), the SSL certificates will not be validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
version_added: "2.7"
sslverify:
description:
- Disables SSL validation of the repository server for this transaction.
- This should be set to V(false) if one of the configured repositories is using an untrusted or self-signed certificate.
type: bool
default: "yes"
version_added: "2.13"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting O(allow_downgrade=true) can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.7"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF.
- This option is deprecated and will be removed in ansible-core 2.20.
type: bool
default: "yes"
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
version_added: "2.7"
lock_timeout:
description:
- Amount of time to wait for the dnf lockfile to be freed.
required: false
default: 30
type: int
version_added: "2.8"
install_weak_deps:
description:
- Will also install all packages linked by a weak dependency relation.
type: bool
default: "yes"
version_added: "2.8"
download_dir:
description:
- Specifies an alternate directory to store packages.
- Has an effect only if O(download_only) is specified.
type: str
version_added: "2.8"
allowerasing:
description:
- If V(true) it allows erasing of installed packages to resolve dependencies.
required: false
type: bool
default: "no"
version_added: "2.10"
nobest:
description:
- This is the opposite of the O(best) option kept for backwards compatibility.
- Since ansible-core 2.17 the default value is set by the operating system distribution.
required: false
type: bool
version_added: "2.11"
best:
description:
- When set to V(true), either use a package with the highest version available or fail.
- When set to V(false), if the latest version cannot be installed go with the lower version.
- Default is set by the operating system distribution.
required: false
type: bool
version_added: "2.17"
cacheonly:
description:
- Tells dnf to run entirely from system cache; does not download or update metadata.
type: bool
default: "no"
version_added: "2.12"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
details: dnf has 2 action plugins that use it under the hood, M(ansible.builtin.dnf) and M(ansible.builtin.package).
support: partial
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: rhel
notes:
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
- Group removal doesn't work if the group was installed with Ansible because
upstream dnf's API doesn't properly mark groups as installed, therefore upon
removal the module is unable to detect that the group is installed
U(https://bugzilla.redhat.com/show_bug.cgi?id=1620324).
- While O(use_backend=yum) and the ability to call the action plugin as
M(ansible.builtin.yum) are provided for syntax compatibility, the YUM
backend was removed in ansible-core 2.17 because the required libraries are
not available for any supported version of Python. If you rely on this
functionality, use an older version of Ansible.
requirements:
- python3-dnf
author:
- Igor Gnatenko (@ignatenkobrain) <i.gnatenko.brain@gmail.com>
- Cristian van Ee (@DJMuggs) <cristian at cvee.org>
- Berend De Schouwer (@berenddeschouwer)
- Adam Miller (@maxamillion) <admiller@redhat.com>
"""
EXAMPLES = """
- name: Install the latest version of Apache
ansible.builtin.dnf:
name: httpd
state: latest
- name: Install Apache >= 2.4
ansible.builtin.dnf:
name: httpd >= 2.4
state: present
- name: Install the latest version of Apache and MariaDB
ansible.builtin.dnf:
name:
- httpd
- mariadb-server
state: latest
- name: Remove the Apache package
ansible.builtin.dnf:
name: httpd
state: absent
- name: Install the latest version of Apache from the testing repo
ansible.builtin.dnf:
name: httpd
enablerepo: testing
state: present
- name: Upgrade all packages
ansible.builtin.dnf:
name: "*"
state: latest
- name: Update the webserver, depending on which is installed on the system. Do not install the other one
ansible.builtin.dnf:
name:
- httpd
- nginx
state: latest
update_only: yes
- name: Install the nginx rpm from a remote repo
ansible.builtin.dnf:
name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
state: present
- name: Install nginx rpm from a local file
ansible.builtin.dnf:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: Install Package based upon the file it provides
ansible.builtin.dnf:
name: /usr/bin/cowsay
state: present
- name: Install the 'Development tools' package group
ansible.builtin.dnf:
name: '@Development tools'
state: present
- name: Autoremove unneeded packages installed as dependencies
ansible.builtin.dnf:
autoremove: yes
- name: Uninstall httpd but keep its dependencies
ansible.builtin.dnf:
name: httpd
state: absent
autoremove: no
- name: Install a modularity appstream with defined stream and profile
ansible.builtin.dnf:
name: '@postgresql:9.6/client'
state: present
- name: Install a modularity appstream with defined stream
ansible.builtin.dnf:
name: '@postgresql:9.6'
state: present
- name: Install a modularity appstream with defined profile
ansible.builtin.dnf:
name: '@postgresql/client'
state: present
"""
import os
import sys
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.urls import fetch_file
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
# NOTE dnf Python bindings import is postponed, see DnfModule._ensure_dnf(),
# because we need AnsibleModule object to use get_best_parsable_locale()
# to set proper locale before importing dnf to be able to scrape
# the output in some cases (FIXME?).
dnf = None
class DnfModule(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def __init__(self, module):
# This populates instance vars for all argument spec params
super(DnfModule, self).__init__(module)
self._ensure_dnf()
self.pkg_mgr_name = "dnf"
self.with_modules = dnf.base.WITH_MODULES
def _sanitize_dnf_error_msg_install(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to filter in an install scenario. Do that here.
"""
if (
to_text("no package matched") in to_text(error) or
to_text("No match for argument:") in to_text(error)
):
return "No package {0} available.".format(spec)
return error
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
# envra format for backwards compat
result['envra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(**result)
# keep nevra key for backwards compat as it was previously
# defined with a value in envra format
result['nevra'] = result['envra']
if package.installtime == 0:
result['yumstate'] = 'available'
else:
result['yumstate'] = 'installed'
return result
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale
os.environ['LANGUAGE'] = os.environ['LANG'] = locale
global dnf
try:
import dnf
import dnf.const
import dnf.exceptions
import dnf.package
import dnf.subject
import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
if HAS_DNF:
return
system_interpreters = ['/usr/libexec/platform-python',
'/usr/bin/python3',
'/usr/bin/python']
if not has_respawned():
# probe well-known system Python locations for accessible bindings, favoring py3
interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
if interpreter:
# respawn under the interpreter where the bindings should be found
respawn_module(interpreter)
# end of the line for this module, the process will exit here once the respawned module completes
# done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
self.module.fail_json(
msg="Could not import the dnf python module using {0} ({1}). "
"Please install `python3-dnf` package or ensure you have specified the "
"correct ansible_python_interpreter. (attempted {2})"
.format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
results=[]
)
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/', sslverify=True):
"""Configure the dnf Base object."""
conf = base.conf
# Change the configuration file path if provided, this must be done before conf.read() is called
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file,
results=[],
)
else:
conf.config_file_path = conf_file
# Read the configuration file
conf.read()
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
conf.localpkg_gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set certificate validation
conf.sslverify = sslverify
# Set installroot
conf.installroot = installroot
# Load substitutions from the filesystem
conf.substitutions.update_from_etc(installroot)
# Handle different DNF versions immutable mutable datatypes and
# dnf v1/v2/v3
#
# In DNF < 3.0 are lists, and modifying them works
# In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
# In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
#
# https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
#
# Set excludes
if self.exclude:
_excludes = list(conf.exclude)
_excludes.extend(self.exclude)
conf.exclude = _excludes
# Set disable_excludes
if self.disable_excludes:
_disable_excludes = list(conf.disable_excludes)
if self.disable_excludes not in _disable_excludes:
_disable_excludes.append(self.disable_excludes)
conf.disable_excludes = _disable_excludes
# Set releasever
if self.releasever is not None:
conf.substitutions['releasever'] = self.releasever
if conf.substitutions.get('releasever') is None:
self.module.warn(
'Unable to detect release version (use "releasever" option to specify release version)'
)
# values of conf.substitutions are expected to be strings
# setting this to an empty string instead of None appears to mimic the DNF CLI behavior
conf.substitutions['releasever'] = ''
# Honor installroot for dnf directories
# This will also perform variable substitutions in the paths
for opt in ('cachedir', 'logdir', 'persistdir'):
conf.prepend_installroot(opt)
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
# best and nobest are mutually exclusive
if self.nobest is not None:
conf.best = not self.nobest
elif self.best is not None:
conf.best = self.best
if self.download_only:
conf.downloadonly = True
if self.download_dir:
conf.destdir = self.download_dir
if self.cacheonly:
conf.cacheonly = True
# Default in dnf upstream is true
conf.clean_requirements_on_remove = self.autoremove
# Default in dnf (and module default) is True
conf.install_weak_deps = self.install_weak_deps
def _specify_repositories(self, base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.enable()
for repo in base.repos.iter_enabled():
if self.disable_gpg_check:
repo.gpgcheck = False
repo.repo_gpgcheck = False
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, sslverify):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
base.setup_loggers()
base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
base.pre_configure_plugins()
self._specify_repositories(base, disablerepo, enablerepo)
base.configure_plugins()
try:
if self.update_cache:
try:
base.update_cache()
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
base.fill_sack(load_system_repo='auto')
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
add_security_filters = getattr(base, "add_security_filters", None)
if callable(add_security_filters):
filters = {}
if self.bugfix:
filters.setdefault('types', []).append('bugfix')
if self.security:
filters.setdefault('types', []).append('security')
if filters:
add_security_filters('eq', **filters)
else:
filters = []
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
filters.append(base.sack.query().upgrades().filter(**key))
if self.security:
key = {'advisory_type__eq': 'security'}
filters.append(base.sack.query().upgrades().filter(**key))
if filters:
base._update_security_filters = filters
return base
def list_items(self, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
self._package_dict(package)
for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in self.base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [self._package_dict(package) for package in packages]
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
installed_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).installed()
if dnf.util.is_glob_pattern(pkg):
available_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).available()
return not (
{p.name for p in available_query} - {p.name for p in installed_query}
)
else:
return bool(installed_query)
def _is_newer_version_installed(self, pkg_spec):
try:
if isinstance(pkg_spec, dnf.package.Package):
installed = sorted(self.base.sack.query().installed().filter(name=pkg_spec.name, arch=pkg_spec.arch))[-1]
return installed.evr_gt(pkg_spec)
else:
available = dnf.subject.Subject(pkg_spec).get_best_query(sack=self.base.sack).available()
installed = self.base.sack.query().installed().filter(name=available[0].name)
for arch in sorted(set(p.arch for p in installed)): # select only from already-installed arches for this case
installed_pkg = sorted(installed.filter(arch=arch))[-1]
try:
available_pkg = sorted(available.filter(arch=arch))[-1]
except IndexError:
continue # nothing currently available for this arch; keep going
if installed_pkg.evr_gt(available_pkg):
return True
return False
except IndexError:
return False
def _mark_package_install(self, pkg_spec, upgrade=False):
"""Mark the package for install."""
is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
is_installed = self._is_installed(pkg_spec)
msg = ''
try:
if is_newer_version_installed:
if self.allow_downgrade:
# dnf only does allow_downgrade, we have to handle this ourselves
# because it allows a possibility for non-idempotent transactions
# on a system's package set (pending the yum repo has many old
# NVRs indexed)
if upgrade:
if is_installed: # Case 1
# TODO: Is this case reachable?
#
# _is_installed() demands a name (*not* NVR) or else is always False
# (wildcards are treated literally).
#
# Meanwhile, _is_newer_version_installed() demands something versioned
# or else is always false.
#
# I fail to see how they can both be true at the same time for any
# given pkg_spec. -re
self.base.upgrade(pkg_spec)
else: # Case 2
self.base.install(pkg_spec, strict=self.base.conf.strict)
else: # Case 3
self.base.install(pkg_spec, strict=self.base.conf.strict)
else: # Case 4, Nothing to do, report back
pass
elif is_installed: # A potentially older (or same) version is installed
if upgrade: # Case 5
self.base.upgrade(pkg_spec)
else: # Case 6, Nothing to do, report back
pass
else: # Case 7, The package is not installed, simply install it
self.base.install(pkg_spec, strict=self.base.conf.strict)
except dnf.exceptions.MarkingError as e:
msg = "No package {0} available.".format(pkg_spec)
if self.base.conf.strict:
return {
'failed': True,
'msg': msg,
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.DepsolveError as e:
return {
'failed': True,
'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.Error as e:
return {
'failed': True,
'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
return {'failed': False, 'msg': msg, 'failure': '', 'rc': 0}
def _parse_spec_group_file(self):
pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
already_loaded_comps = False # Only load this if necessary, it's slow
for name in self.names:
if '://' in name:
name = fetch_file(self.module, name)
filenames.append(name)
elif name.endswith(".rpm"):
filenames.append(name)
elif name.startswith('/'):
# dnf install /usr/bin/vi
installed = self.base.sack.query().filter(provides=name, file=name).installed().run()
if installed:
pkg_specs.append(installed[0].name) # should be only one?
elif not self.update_only:
# not installed, pass the filename for dnf to process
pkg_specs.append(name)
elif name.startswith("@") or ('/' in name):
if not already_loaded_comps:
self.base.read_comps()
already_loaded_comps = True
grp_env_mdl_candidate = name[1:].strip()
if self.with_modules:
mdl = self.module_base._get_modules(grp_env_mdl_candidate)
if mdl[0]:
module_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
pkg_specs.append(name)
return pkg_specs, grp_specs, module_specs, filenames
def _update_only(self, pkgs):
not_installed = []
for pkg in pkgs:
if self._is_installed(
self._package_dict(pkg)["nevra"] if isinstance(pkg, dnf.package.Package) else pkg
):
try:
if isinstance(pkg, dnf.package.Package):
self.base.package_upgrade(pkg)
else:
self.base.upgrade(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting update_only operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
else:
not_installed.append(pkg)
return not_installed
def _install_remote_rpms(self, filenames):
try:
pkgs = self.base.add_remote_rpms(filenames)
if self.update_only:
self._update_only(pkgs)
else:
for pkg in pkgs:
if not (self._is_newer_version_installed(pkg) and not self.allow_downgrade):
self.base.package_install(pkg, strict=self.base.conf.strict)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
def _is_module_installed(self, module_spec):
if self.with_modules:
module_spec = module_spec.strip()
module_list, nsv = self.module_base._get_modules(module_spec)
enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
if enabled_streams:
if nsv.stream:
if nsv.stream in enabled_streams:
return True # The provided stream was found
else:
return False # The provided stream was not found
else:
return True # No stream provided, but module found
return False # seems like a logical default
def ensure(self):
response = {
'msg': "",
'changed': False,
'results': [],
'rc': 0
}
# Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't.
failure_response = {
'msg': "",
'failures': [],
'results': [],
'rc': 1
}
# Autoremove is called alone
# Jump to remove path where base.autoremove() is run
if not self.names and self.autoremove:
self.names = []
self.state = 'absent'
if self.names == ['*'] and self.state == 'latest':
try:
self.base.upgrade_all()
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to upgrade all packages"
self.module.fail_json(**failure_response)
else:
pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames]
groups = []
environments = []
for group_spec in (g.strip() for g in group_specs):
group = self.base.comps.group_by_pattern(group_spec)
if group:
groups.append(group.id)
else:
environment = self.base.comps.environment_by_pattern(group_spec)
if environment:
environments.append(environment.id)
else:
self.module.fail_json(
msg="No group {0} available.".format(group_spec),
results=[],
)
if self.state in ['installed', 'present']:
# Install files.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Install modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if not self._is_module_installed(module):
response['results'].append("Module {0} installed.".format(module))
self.module_base.install([module])
self.module_base.enable([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
# Install groups.
for group in groups:
try:
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install group: {0}".format(group)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if module_specs and not self.with_modules:
# This means that the group or env wasn't found in comps
self.module.fail_json(
msg="No group {0} available.".format(module_specs[0]),
results=[],
)
# Install packages.
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
elif self.state == 'latest':
# "latest" is same as "installed" for filenames.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Upgrade modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} upgraded.".format(module))
self.module_base.upgrade([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
try:
self.base.group_upgrade(group)
response['results'].append("Group {0} upgraded.".format(group))
except dnf.exceptions.CompsError:
if not self.update_only:
# If not already installed, try to install.
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
try:
self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec, upgrade=True)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
else:
# state == absent
if filenames:
self.module.fail_json(
msg="Cannot remove paths -- please specify package name.",
results=[],
)
# Remove modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} removed.".format(module))
self.module_base.remove([module])
self.module_base.disable([module])
self.module_base.reset([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
self.base.group_remove(group)
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
for environment in environments:
try:
self.base.environment_remove(environment)
except dnf.exceptions.CompsError:
# Environment is already uninstalled.
pass
for pkg_spec in pkg_specs:
try:
self.base.remove(pkg_spec)
except dnf.exceptions.MarkingError as e:
response['results'].append(f"{e.value}: {pkg_spec}")
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
self.allowerasing = True
if self.autoremove:
self.base.autoremove()
try:
# NOTE for people who go down the rabbit hole of figuring out why
# resolve() throws DepsolveError here on dep conflict, but not when
# called from the CLI: It's controlled by conf.best. When best is
# set, Hawkey will fail the goal, and resolve() in dnf.base.Base
# will throw. Otherwise if it's not set, the update (install) will
# be (almost silently) removed from the goal, and Hawkey will report
# success. Note that in this case, similar to the CLI, skip_broken
# does nothing to help here, so we don't take it into account at
# all.
if not self.base.resolve(allow_erasing=self.allowerasing):
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
response['msg'] = "Nothing to do"
self.module.exit_json(**response)
else:
response['changed'] = True
# If packages got installed/removed, add them to the results.
# We do this early so we can use it for both check_mode and not.
if self.download_only:
install_action = 'Downloaded'
else:
install_action = 'Installed'
for package in self.base.transaction.install_set:
response['results'].append("{0}: {1}".format(install_action, package))
for package in self.base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package))
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
if self.module.check_mode:
response['msg'] = "Check mode: No changes made, but would have if not in check mode"
self.module.exit_json(**response)
try:
if self.download_only and self.download_dir and self.base.conf.destdir:
dnf.util.ensure_dir(self.base.conf.destdir)
self.base.repos.all().pkgdir = self.base.conf.destdir
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
failure_response['msg'] = "Failed to download packages: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
# Validate GPG. This is NOT done in dnf.Base (it's done in the
# upstream CLI subclass of dnf.Base)
if not self.disable_gpg_check:
for package in self.base.transaction.install_set:
fail = False
gpgres, gpgerr = self.base._sig_check_pkg(package)
if gpgres == 0: # validated successfully
continue
elif gpgres == 1: # validation failed, install cert?
try:
self.base._get_key_for_package(package)
except dnf.exceptions.Error as e:
fail = True
else: # fatal error
fail = True
if fail:
msg = 'Failed to validate GPG signature for {0}: {1}'.format(package, gpgerr)
self.module.fail_json(msg)
if self.download_only:
# No further work left to do, and the results were already updated above.
# Just return them.
self.module.exit_json(**response)
else:
tid = self.base.do_transaction()
if tid is not None:
transaction = self.base.history.old([tid])[0]
if transaction.return_code:
failure_response['failures'].append(transaction.output())
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
self.module.exit_json(**response)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
def run(self):
if self.update_cache and not self.names and not self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.module.exit_json(
msg="Cache updated",
changed=False,
results=[],
rc=0
)
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happen
# - dnf: autoremove=yes state=installed
if self.state is None:
self.state = 'installed'
if self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.list_items(self.list)
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not self.download_only and not dnf.util.am_i_root():
self.module.fail_json(
msg="This command has to be run under the root user.",
results=[],
)
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
if self.with_modules:
self.module_base = dnf.module.module_base.ModuleBase(self.base)
try:
self.ensure()
finally:
self.base.close()
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'dnf', 'yum', 'yum4', 'dnf4', 'dnf5'])
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = DnfModule(module)
try:
module_implementation.run()
except dnf.exceptions.RepoError as de:
module.fail_json(
msg="Failed to synchronize repodata: {0}".format(to_native(de)),
rc=1,
results=[],
changed=False
)
if __name__ == '__main__':
main()
| 52,288
|
Python
|
.py
| 1,144
| 33.103147
| 149
| 0.577698
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,154
|
sysvinit.py
|
ansible_ansible/lib/ansible/modules/sysvinit.py
|
# -*- coding: utf-8 -*-
# (c) 2017, Brian Coca <bcoca@ansible.com>
# (c) 2017, Adam Miller <admiller@redhat.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: sysvinit
author:
- "Ansible Core Team"
version_added: "2.6"
short_description: Manage SysV services.
description:
- Controls services on target hosts that use the SysV init system.
options:
name:
required: true
description:
- Name of the service.
type: str
aliases: ['service']
state:
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
Not all init scripts support V(restarted) nor V(reloaded) natively, so these will both trigger a stop and start as needed.
type: str
enabled:
type: bool
description:
- Whether the service should start on boot. At least one of O(state) and O(enabled) are required.
sleep:
default: 1
description:
- If the service is being V(restarted) or V(reloaded) then sleep this many seconds between the stop and start command.
This helps to workaround badly behaving services.
type: int
pattern:
description:
- A substring to look for as would be found in the output of the I(ps) command as a stand-in for a status result.
- If the string is found, the service will be assumed to be running.
- "This option is mainly for use with init scripts that don't support the C(status) option."
type: str
runlevels:
description:
- The runlevels this script should be enabled/disabled from.
- Use this to override the defaults set by the package or init script itself.
type: list
elements: str
arguments:
description:
- Additional arguments provided on the command line that some init scripts accept.
type: str
aliases: [ 'args' ]
daemonize:
type: bool
description:
- Have the module daemonize as the service itself might not do so properly.
- This is useful with badly written init scripts or daemons, which
commonly manifests as the task hanging as it is still holding the
tty or the service dying when the task is over as the connection
closes the session.
default: no
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
notes:
- One option other than name is required.
- The service names might vary by specific OS/distribution.
requirements:
- That the service managed has a corresponding init script.
"""
EXAMPLES = """
- name: Make sure apache2 is started
ansible.builtin.sysvinit:
name: apache2
state: started
enabled: yes
- name: Sleep for 5 seconds between stop and start command of badly behaving service
ansible.builtin.sysvinit:
name: apache2
state: restarted
sleep: 5
- name: Make sure apache2 is started on runlevels 3 and 5
ansible.builtin.sysvinit:
name: apache2
state: started
enabled: yes
runlevels:
- 3
- 5
"""
RETURN = r"""
results:
description: results from actions taken
returned: always
type: complex
contains:
name:
description: Name of the service
type: str
returned: always
sample: "apache2"
status:
description: Status of the service
type: dict
returned: changed
sample: {
"enabled": {
"changed": true,
"rc": 0,
"stderr": "",
"stdout": ""
},
"stopped": {
"changed": true,
"rc": 0,
"stderr": "",
"stdout": "Stopping web server: apache2.\n"
}
}
"""
import re
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.service import sysv_is_enabled, get_sysv_script, sysv_exists, fail_if_missing, get_ps, daemonize
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['service']),
state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled=dict(type='bool'),
sleep=dict(type='int', default=1),
pattern=dict(type='str'),
arguments=dict(type='str', aliases=['args']),
runlevels=dict(type='list', elements='str'),
daemonize=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
name = module.params['name']
action = module.params['state']
enabled = module.params['enabled']
runlevels = module.params['runlevels']
pattern = module.params['pattern']
sleep_for = module.params['sleep']
rc = 0
out = err = ''
result = {
'name': name,
'changed': False,
'status': {}
}
# ensure service exists, get script name
fail_if_missing(module, sysv_exists(name), name)
script = get_sysv_script(name)
# locate binaries for service management
paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
binaries = ['chkconfig', 'update-rc.d', 'insserv', 'service']
# Keeps track of the service status for various runlevels because we can
# operate on multiple runlevels at once
runlevel_status = {}
location = {}
for binary in binaries:
location[binary] = module.get_bin_path(binary, opt_dirs=paths)
# figure out enable status
if runlevels:
for rl in runlevels:
runlevel_status.setdefault(rl, {})
runlevel_status[rl]["enabled"] = sysv_is_enabled(name, runlevel=rl)
else:
runlevel_status["enabled"] = sysv_is_enabled(name)
# figure out started status, everyone does it different!
is_started = False
worked = False
# user knows other methods fail and supplied pattern
if pattern:
worked = is_started = get_ps(module, pattern)
else:
if location.get('service'):
# standard tool that has been 'destandardized' by reimplementation in other OS/distros
cmd = '%s %s status' % (location['service'], name)
elif script:
# maybe script implements status (not LSB)
cmd = '%s status' % script
else:
module.fail_json(msg="Unable to determine service status")
(rc, out, err) = module.run_command(cmd)
if not rc == -1:
# special case
if name == 'iptables' and "ACCEPT" in out:
worked = True
is_started = True
# check output messages, messy but sadly more reliable than rc
if not worked and out.count('\n') <= 1:
cleanout = out.lower().replace(name.lower(), '')
for stopped in ['stop', 'is dead ', 'dead but ', 'could not access pid file', 'inactive']:
if stopped in cleanout:
worked = True
break
if not worked:
for started_status in ['run', 'start', 'active']:
if started_status in cleanout and "not " not in cleanout:
is_started = True
worked = True
break
# hope rc is not lying to us, use often used 'bad' returns
if not worked and rc in [1, 2, 3, 4, 69]:
worked = True
if not worked:
# hail mary
if rc == 0:
is_started = True
worked = True
# ps for luck, can only assure positive match
elif get_ps(module, name):
is_started = True
worked = True
module.warn("Used ps output to match service name and determine it is up, this is very unreliable")
if not worked:
module.warn("Unable to determine if service is up, assuming it is down")
###########################################################################
# BEGIN: Enable/Disable
result['status'].setdefault('enabled', {})
result['status']['enabled']['changed'] = False
result['status']['enabled']['rc'] = None
result['status']['enabled']['stdout'] = None
result['status']['enabled']['stderr'] = None
if runlevels:
result['status']['enabled']['runlevels'] = runlevels
for rl in runlevels:
if enabled != runlevel_status[rl]["enabled"]:
result['changed'] = True
result['status']['enabled']['changed'] = True
if not module.check_mode and result['changed']:
# Perform enable/disable here
if enabled:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s enable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s --level %s %s on" % (location['chkconfig'], ''.join(runlevels), name))
else:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s disable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s --level %s %s off" % (location['chkconfig'], ''.join(runlevels), name))
else:
if enabled is not None and enabled != runlevel_status["enabled"]:
result['changed'] = True
result['status']['enabled']['changed'] = True
if not module.check_mode and result['changed']:
# Perform enable/disable here
if enabled:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s defaults" % (location['update-rc.d'], name))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s %s on" % (location['chkconfig'], name))
else:
if location.get('update-rc.d'):
(rc, out, err) = module.run_command("%s %s disable" % (location['update-rc.d'], name))
elif location.get('chkconfig'):
(rc, out, err) = module.run_command("%s %s off" % (location['chkconfig'], name))
# Assigned above, might be useful is something goes sideways
if not module.check_mode and result['status']['enabled']['changed']:
result['status']['enabled']['rc'] = rc
result['status']['enabled']['stdout'] = out
result['status']['enabled']['stderr'] = err
rc, out, err = None, None, None
if "illegal runlevel specified" in result['status']['enabled']['stderr']:
module.fail_json(msg="Illegal runlevel specified for enable operation on service %s" % name, **result)
# END: Enable/Disable
###########################################################################
###########################################################################
# BEGIN: state
result['status'].setdefault(module.params['state'], {})
result['status'][module.params['state']]['changed'] = False
result['status'][module.params['state']]['rc'] = None
result['status'][module.params['state']]['stdout'] = None
result['status'][module.params['state']]['stderr'] = None
if action:
action = re.sub(r'p?ed$', '', action.lower())
def runme(doit):
args = module.params['arguments']
cmd = "%s %s %s" % (script, doit, "" if args is None else args)
# how to run
if module.params['daemonize']:
(rc, out, err) = daemonize(module, cmd)
else:
(rc, out, err) = module.run_command(cmd)
# FIXME: ERRORS
if rc != 0:
module.fail_json(msg="Failed to %s service: %s" % (action, name), rc=rc, stdout=out, stderr=err)
return (rc, out, err)
if action == 'restart':
result['changed'] = True
result['status'][module.params['state']]['changed'] = True
if not module.check_mode:
# cannot rely on existing 'restart' in init script
for dothis in ['stop', 'start']:
(rc, out, err) = runme(dothis)
if sleep_for:
sleep(sleep_for)
elif is_started != (action == 'start'):
result['changed'] = True
result['status'][module.params['state']]['changed'] = True
if not module.check_mode:
rc, out, err = runme(action)
elif is_started == (action == 'stop'):
result['changed'] = True
result['status'][module.params['state']]['changed'] = True
if not module.check_mode:
rc, out, err = runme(action)
if not module.check_mode and result['status'][module.params['state']]['changed']:
result['status'][module.params['state']]['rc'] = rc
result['status'][module.params['state']]['stdout'] = out
result['status'][module.params['state']]['stderr'] = err
rc, out, err = None, None, None
# END: state
###########################################################################
module.exit_json(**result)
if __name__ == '__main__':
main()
| 13,956
|
Python
|
.py
| 328
| 32.719512
| 136
| 0.563982
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,155
|
ping.py
|
ansible_ansible/lib/ansible/modules/ping.py
|
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return V(pong) on success
description:
- A trivial test module, this module always returns V(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable Python is configured.
- This is NOT ICMP ping, this is just a trivial test module that requires Python on the remote-node.
- For Windows targets, use the M(ansible.windows.win_ping) module instead.
- For Network targets, use the M(ansible.netcommon.net_ping) module instead.
options:
data:
description:
- Data to return for the RV(ping) return value.
- If this parameter is set to V(crash), the module will cause an exception.
type: str
default: pong
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
seealso:
- module: ansible.netcommon.net_ping
- module: ansible.windows.win_ping
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = """
# Test we can logon to 'webservers' and execute python with json lib.
# ansible webservers -m ansible.builtin.ping
- name: Example from an Ansible Playbook
ansible.builtin.ping:
- name: Induce an exception to see what happens
ansible.builtin.ping:
data: crash
"""
RETURN = """
ping:
description: Value provided with the O(data) parameter.
returned: success
type: str
sample: pong
"""
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(type='str', default='pong'),
),
supports_check_mode=True
)
if module.params['data'] == 'crash':
raise Exception("boom")
result = dict(
ping=module.params['data'],
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 2,325
|
Python
|
.py
| 72
| 28.375
| 102
| 0.706303
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,156
|
fail.py
|
ansible_ansible/lib/ansible/modules/fail.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: fail
short_description: Fail with custom message
description:
- This module fails the progress with a custom message.
- It can be useful for bailing out when a certain condition is met using C(when).
- This module is also supported for Windows targets.
version_added: "0.8"
options:
msg:
description:
- The customized message used for failing execution.
- If omitted, fail will simply bail out with a generic message.
type: str
default: Failed as requested from task
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
become:
support: none
bypass_host_loop:
support: none
connection:
support: none
check_mode:
support: full
diff_mode:
support: none
delegation:
details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
support: partial
platform:
platforms: all
seealso:
- module: ansible.builtin.assert
- module: ansible.builtin.debug
- module: ansible.builtin.meta
author:
- Dag Wieers (@dagwieers)
"""
EXAMPLES = r"""
- name: Example using fail and when together
ansible.builtin.fail:
msg: The system may not be provisioned according to the CMDB status.
when: cmdb_status != "to-be-staged"
"""
| 1,659
|
Python
|
.py
| 57
| 25.350877
| 107
| 0.715091
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,157
|
dnf5.py
|
ansible_ansible/lib/ansible/modules/dnf5.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: dnf5
author: Ansible Core Team
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf5) package manager.
- "WARNING: The I(dnf5) package manager is still under development and not all features that the existing M(ansible.builtin.dnf) module
provides are implemented in M(ansible.builtin.dnf5), please consult specific options for more information."
short_description: Manages packages with the I(dnf5) package manager
options:
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
When using O(state=latest), this can be C(*) which means run: C(dnf -y update).
You can also pass a url or a local path to an rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0).
Spaces around the operator are required.
- You can also pass an absolute path for a binary which is provided by the package to install.
See examples for more information.
aliases:
- pkg
type: list
elements: str
default: []
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks.
Use M(ansible.builtin.package_facts) instead of the O(list) argument as a best practice.
type: str
state:
description:
- Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
- Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true),
then V(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
type: str
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if O(state) is V(present) or V(latest).
- This setting affects packages installed from a repository as well as
"local" packages installed from the filesystem or a URL.
type: bool
default: 'no'
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
type: str
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
type: str
autoremove:
description:
- If V(true), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when O(state=absent).
type: bool
default: "no"
exclude:
description:
- Package name(s) to exclude when O(state=present) or O(state=latest). This can be a
list or a comma separated string.
type: list
elements: str
default: []
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
without raising an error. Equivalent to passing the C(--skip-broken) option.
type: bool
default: "no"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
Has an effect only if O(state=present) or O(state=latest).
type: bool
default: "no"
aliases: [ expire-cache ]
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if O(state=present) or O(state=latest).
default: "no"
type: bool
security:
description:
- If set to V(true), and O(state=latest) then only installs updates that have been marked security related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
type: bool
default: "no"
bugfix:
description:
- If set to V(true), and O(state=latest) then only installs updates that have been marked bugfix related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
default: "no"
type: bool
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
- O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin).
- Requires python3-libdnf5 5.2.0.0+.
type: list
elements: str
default: []
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
- O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin).
- Requires python3-libdnf5 5.2.0.0+.
type: list
default: []
elements: str
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to V(all), disables all excludes.
- If set to V(main), disable excludes defined in C([main]) in C(dnf.conf).
- If set to V(repoid), disable excludes defined for given repo id.
type: str
validate_certs:
description:
- This is effectively a no-op in the dnf5 module as dnf5 itself handles downloading a https url as the source of the rpm,
but is an accepted parameter for feature parity/compatibility with the M(ansible.builtin.dnf) module.
type: bool
default: "yes"
sslverify:
description:
- Disables SSL validation of the repository server for this transaction.
- This should be set to V(false) if one of the configured repositories is using an untrusted or self-signed certificate.
type: bool
default: "yes"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting O(allow_downgrade=true) can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF.
- This option is deprecated and will be removed in ansible-core 2.20.
type: bool
default: "yes"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
lock_timeout:
description:
- This is currently a no-op as dnf5 does not provide an option to configure it.
- Amount of time to wait for the dnf lockfile to be freed.
required: false
default: 30
type: int
install_weak_deps:
description:
- Will also install all packages linked by a weak dependency relation.
type: bool
default: "yes"
download_dir:
description:
- Specifies an alternate directory to store packages.
- Has an effect only if O(download_only) is specified.
type: str
allowerasing:
description:
- If V(true) it allows erasing of installed packages to resolve dependencies.
required: false
type: bool
default: "no"
nobest:
description:
- This is the opposite of the O(best) option kept for backwards compatibility.
- Since ansible-core 2.17 the default value is set by the operating system distribution.
required: false
type: bool
best:
description:
- When set to V(true), either use a package with the highest version available or fail.
- When set to V(false), if the latest version cannot be installed go with the lower version.
- Default is set by the operating system distribution.
required: false
type: bool
version_added: "2.17"
cacheonly:
description:
- Tells dnf to run entirely from system cache; does not download or update metadata.
type: bool
default: "no"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
details: dnf5 has 2 action plugins that use it under the hood, M(ansible.builtin.dnf) and M(ansible.builtin.package).
support: partial
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: rhel
requirements:
- "python3-libdnf5"
version_added: 2.15
"""
EXAMPLES = """
- name: Install the latest version of Apache
ansible.builtin.dnf5:
name: httpd
state: latest
- name: Install Apache >= 2.4
ansible.builtin.dnf5:
name: httpd >= 2.4
state: present
- name: Install the latest version of Apache and MariaDB
ansible.builtin.dnf5:
name:
- httpd
- mariadb-server
state: latest
- name: Remove the Apache package
ansible.builtin.dnf5:
name: httpd
state: absent
- name: Install the latest version of Apache from the testing repo
ansible.builtin.dnf5:
name: httpd
enablerepo: testing
state: present
- name: Upgrade all packages
ansible.builtin.dnf5:
name: "*"
state: latest
- name: Update the webserver, depending on which is installed on the system. Do not install the other one
ansible.builtin.dnf5:
name:
- httpd
- nginx
state: latest
update_only: yes
- name: Install the nginx rpm from a remote repo
ansible.builtin.dnf5:
name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
state: present
- name: Install nginx rpm from a local file
ansible.builtin.dnf5:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: Install Package based upon the file it provides
ansible.builtin.dnf5:
name: /usr/bin/cowsay
state: present
- name: Install the 'Development tools' package group
ansible.builtin.dnf5:
name: '@Development tools'
state: present
- name: Autoremove unneeded packages installed as dependencies
ansible.builtin.dnf5:
autoremove: yes
- name: Uninstall httpd but keep its dependencies
ansible.builtin.dnf5:
name: httpd
state: absent
autoremove: no
"""
RETURN = """
msg:
description: Additional information about the result
returned: always
type: str
sample: "Nothing to do"
results:
description: A list of the dnf transaction results
returned: success
type: list
sample: ["Installed: lsof-4.94.0-4.fc37.x86_64"]
failures:
description: A list of the dnf transaction failures
returned: failure
type: list
sample: ["Argument 'lsof' matches only excluded packages."]
rc:
description: For compatibility, 0 for success, 1 for failure
returned: always
type: int
sample: 0
"""
import os
import sys
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
libdnf5 = None
def is_installed(base, spec):
settings = libdnf5.base.ResolveSpecSettings()
installed_query = libdnf5.rpm.PackageQuery(base)
installed_query.filter_installed()
match, nevra = installed_query.resolve_pkg_spec(spec, settings, True)
# FIXME use `is_glob_pattern` function when available:
# https://github.com/rpm-software-management/dnf5/issues/1563
glob_patterns = set("*[?")
if any(set(char) & glob_patterns for char in spec):
available_query = libdnf5.rpm.PackageQuery(base)
available_query.filter_available()
available_query.resolve_pkg_spec(spec, settings, True)
return not (
{p.get_name() for p in available_query} - {p.get_name() for p in installed_query}
)
else:
return match
def is_newer_version_installed(base, spec):
# FIXME investigate whether this function can be replaced by dnf5's allow_downgrade option
if "/" in spec:
spec = spec.split("/")[-1]
if spec.endswith(".rpm"):
spec = spec[:-4]
try:
spec_nevra = next(iter(libdnf5.rpm.Nevra.parse(spec)))
except (RuntimeError, StopIteration):
return False
spec_version = spec_nevra.get_version()
if not spec_version:
return False
installed = libdnf5.rpm.PackageQuery(base)
installed.filter_installed()
installed.filter_name([spec_nevra.get_name()])
installed.filter_latest_evr()
try:
installed_package = list(installed)[-1]
except IndexError:
return False
target = libdnf5.rpm.PackageQuery(base)
target.filter_name([spec_nevra.get_name()])
target.filter_version([spec_version])
spec_release = spec_nevra.get_release()
if spec_release:
target.filter_release([spec_release])
spec_epoch = spec_nevra.get_epoch()
if spec_epoch:
target.filter_epoch([spec_epoch])
target.filter_latest_evr()
try:
target_package = list(target)[-1]
except IndexError:
return False
# FIXME https://github.com/rpm-software-management/dnf5/issues/1104
return libdnf5.rpm.rpmvercmp(installed_package.get_evr(), target_package.get_evr()) == 1
def package_to_dict(package):
return {
"nevra": package.get_nevra(),
"envra": package.get_nevra(), # dnf module compat
"name": package.get_name(),
"arch": package.get_arch(),
"epoch": str(package.get_epoch()),
"release": package.get_release(),
"version": package.get_version(),
"repo": package.get_repo_id(),
"yumstate": "installed" if package.is_installed() else "available",
}
def get_unneeded_pkgs(base):
query = libdnf5.rpm.PackageQuery(base)
query.filter_installed()
query.filter_unneeded()
yield from query
class Dnf5Module(YumDnf):
def __init__(self, module):
super(Dnf5Module, self).__init__(module)
self._ensure_dnf()
self.pkg_mgr_name = "dnf5"
def fail_on_non_existing_plugins(self, base):
# https://github.com/rpm-software-management/dnf5/issues/1460
try:
plugin_names = [p.get_name() for p in base.get_plugins_info()]
except AttributeError:
# plugins functionality requires python3-libdnf5 5.2.0.0+
# silently ignore here, the module will fail later when
# base.enable_disable_plugins is attempted to be used if
# user specifies enable_plugin/disable_plugin
return
msg = []
if enable_unmatched := set(self.enable_plugin).difference(plugin_names):
msg.append(
f"No matches were found for the following plugin name patterns while enabling libdnf5 plugins: {', '.join(enable_unmatched)}."
)
if disable_unmatched := set(self.disable_plugin).difference(plugin_names):
msg.append(
f"No matches were found for the following plugin name patterns while disabling libdnf5 plugins: {', '.join(disable_unmatched)}."
)
if msg:
self.module.fail_json(msg=" ".join(msg))
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ["LC_ALL"] = os.environ["LC_MESSAGES"] = locale
os.environ["LANGUAGE"] = os.environ["LANG"] = locale
global libdnf5
has_dnf = True
try:
import libdnf5 # type: ignore[import]
except ImportError:
has_dnf = False
if has_dnf:
return
system_interpreters = [
"/usr/libexec/platform-python",
"/usr/bin/python3",
"/usr/bin/python",
]
if not has_respawned():
# probe well-known system Python locations for accessible bindings, favoring py3
interpreter = probe_interpreters_for_module(system_interpreters, "libdnf5")
if interpreter:
# respawn under the interpreter where the bindings should be found
respawn_module(interpreter)
# end of the line for this module, the process will exit here once the respawned module completes
# done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
self.module.fail_json(
msg="Could not import the libdnf5 python module using {0} ({1}). "
"Please install python3-libdnf5 package or ensure you have specified the "
"correct ansible_python_interpreter. (attempted {2})".format(
sys.executable, sys.version.replace("\n", ""), system_interpreters
),
failures=[],
)
def run(self):
if not self.list and not self.download_only and os.geteuid() != 0:
self.module.fail_json(
msg="This command has to be run under the root user.",
failures=[],
rc=1,
)
base = libdnf5.base.Base()
conf = base.get_config()
if self.conf_file:
conf.config_file_path = self.conf_file
try:
base.load_config()
except RuntimeError as e:
self.module.fail_json(
msg=str(e),
conf_file=self.conf_file,
failures=[],
rc=1,
)
if self.releasever is not None:
variables = base.get_vars()
variables.set("releasever", self.releasever)
if self.exclude:
conf.excludepkgs = self.exclude
if self.disable_excludes:
if self.disable_excludes == "all":
self.disable_excludes = "*"
conf.disable_excludes = self.disable_excludes
conf.skip_broken = self.skip_broken
# best and nobest are mutually exclusive
if self.nobest is not None:
conf.best = not self.nobest
elif self.best is not None:
conf.best = self.best
conf.install_weak_deps = self.install_weak_deps
conf.gpgcheck = not self.disable_gpg_check
conf.localpkg_gpgcheck = not self.disable_gpg_check
conf.sslverify = self.sslverify
conf.clean_requirements_on_remove = self.autoremove
conf.installroot = self.installroot
conf.use_host_config = True # needed for installroot
conf.cacheonly = "all" if self.cacheonly else "none"
if self.download_dir:
conf.destdir = self.download_dir
if self.enable_plugin:
try:
base.enable_disable_plugins(self.enable_plugin, True)
except AttributeError:
self.module.fail_json(msg="'enable_plugin' requires python3-libdnf5 5.2.0.0+")
if self.disable_plugin:
try:
base.enable_disable_plugins(self.disable_plugin, False)
except AttributeError:
self.module.fail_json(msg="'disable_plugin' requires python3-libdnf5 5.2.0.0+")
base.setup()
# https://github.com/rpm-software-management/dnf5/issues/1460
self.fail_on_non_existing_plugins(base)
log_router = base.get_logger()
global_logger = libdnf5.logger.GlobalLogger()
global_logger.set(log_router.get(), libdnf5.logger.Logger.Level_DEBUG)
# FIXME hardcoding the filename does not seem right, should libdnf5 expose the default file name?
logger = libdnf5.logger.create_file_logger(base, "dnf5.log")
log_router.add_logger(logger)
if self.update_cache:
repo_query = libdnf5.repo.RepoQuery(base)
repo_query.filter_type(libdnf5.repo.Repo.Type_AVAILABLE)
for repo in repo_query:
repo_dir = repo.get_cachedir()
if os.path.exists(repo_dir):
repo_cache = libdnf5.repo.RepoCache(base, repo_dir)
repo_cache.write_attribute(libdnf5.repo.RepoCache.ATTRIBUTE_EXPIRED)
sack = base.get_repo_sack()
sack.create_repos_from_system_configuration()
repo_query = libdnf5.repo.RepoQuery(base)
if self.disablerepo:
repo_query.filter_id(self.disablerepo, libdnf5.common.QueryCmp_IGLOB)
for repo in repo_query:
repo.disable()
if self.enablerepo:
repo_query.filter_id(self.enablerepo, libdnf5.common.QueryCmp_IGLOB)
for repo in repo_query:
repo.enable()
try:
sack.load_repos()
except AttributeError:
# dnf5 < 5.2.0.0
sack.update_and_load_enabled_repos(True)
if self.update_cache and not self.names and not self.list:
self.module.exit_json(
msg="Cache updated",
changed=False,
results=[],
rc=0
)
if self.list:
command = self.list
if command == "updates":
command = "upgrades"
if command in {"installed", "upgrades", "available"}:
query = libdnf5.rpm.PackageQuery(base)
getattr(query, "filter_{}".format(command))()
results = [package_to_dict(package) for package in query]
elif command in {"repos", "repositories"}:
query = libdnf5.repo.RepoQuery(base)
query.filter_enabled(True)
results = [{"repoid": repo.get_id(), "state": "enabled"} for repo in query]
else:
resolve_spec_settings = libdnf5.base.ResolveSpecSettings()
query = libdnf5.rpm.PackageQuery(base)
query.resolve_pkg_spec(command, resolve_spec_settings, True)
results = [package_to_dict(package) for package in query]
self.module.exit_json(msg="", results=results, rc=0)
settings = libdnf5.base.GoalJobSettings()
try:
settings.set_group_with_name(True)
except AttributeError:
# dnf5 < 5.2.0.0
settings.group_with_name = True
if self.bugfix or self.security:
advisory_query = libdnf5.advisory.AdvisoryQuery(base)
types = []
if self.bugfix:
types.append("bugfix")
if self.security:
types.append("security")
advisory_query.filter_type(types)
settings.set_advisory_filter(advisory_query)
goal = libdnf5.base.Goal(base)
results = []
if self.names == ["*"] and self.state == "latest":
goal.add_rpm_upgrade(settings)
elif self.state in {"installed", "present", "latest"}:
upgrade = self.state == "latest"
for spec in self.names:
if is_newer_version_installed(base, spec):
if self.allow_downgrade:
goal.add_install(spec, settings)
elif is_installed(base, spec):
if upgrade:
goal.add_upgrade(spec, settings)
else:
if self.update_only:
results.append("Packages providing {} not installed due to update_only specified".format(spec))
else:
goal.add_install(spec, settings)
elif self.state in {"absent", "removed"}:
for spec in self.names:
try:
goal.add_remove(spec, settings)
except RuntimeError as e:
self.module.fail_json(msg=str(e), failures=[], rc=1)
if self.autoremove:
for pkg in get_unneeded_pkgs(base):
goal.add_rpm_remove(pkg, settings)
goal.set_allow_erasing(self.allowerasing)
try:
transaction = goal.resolve()
except RuntimeError as e:
self.module.fail_json(msg=str(e), failures=[], rc=1)
if transaction.get_problems():
failures = []
for log_event in transaction.get_resolve_logs():
if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"installed", "present", "latest"}:
# NOTE dnf module compat
failures.append("No package {} available.".format(log_event.get_spec()))
else:
failures.append(log_event.to_string())
if transaction.get_problems() & libdnf5.base.GoalProblem_SOLVER_ERROR != 0:
msg = "Depsolve Error occurred"
else:
msg = "Failed to install some of the specified packages"
self.module.fail_json(
msg=msg,
failures=failures,
rc=1,
)
# NOTE dnf module compat
actions_compat_map = {
"Install": "Installed",
"Remove": "Removed",
"Replace": "Installed",
"Upgrade": "Installed",
"Replaced": "Removed",
}
changed = bool(transaction.get_transaction_packages())
for pkg in transaction.get_transaction_packages():
if self.download_only:
action = "Downloaded"
else:
action = libdnf5.base.transaction.transaction_item_action_to_string(pkg.get_action())
results.append("{}: {}".format(actions_compat_map.get(action, action), pkg.get_package().get_nevra()))
msg = ""
if self.module.check_mode:
if results:
msg = "Check mode: No changes made, but would have if not in check mode"
else:
transaction.download()
if not self.download_only:
transaction.set_description("ansible dnf5 module")
result = transaction.run()
if result == libdnf5.base.Transaction.TransactionRunResult_ERROR_GPG_CHECK:
self.module.fail_json(
msg="Failed to validate GPG signatures: {}".format(",".join(transaction.get_gpg_signature_problems())),
failures=[],
rc=1,
)
elif result != libdnf5.base.Transaction.TransactionRunResult_SUCCESS:
self.module.fail_json(
msg="Failed to install some of the specified packages",
failures=["{}: {}".format(transaction.transaction_result_to_string(result), log) for log in transaction.get_transaction_problems()],
rc=1,
)
if not msg and not results:
msg = "Nothing to do"
self.module.exit_json(
results=results,
changed=changed,
msg=msg,
rc=0,
)
def main():
Dnf5Module(AnsibleModule(**yumdnf_argument_spec)).run()
if __name__ == "__main__":
main()
| 28,230
|
Python
|
.py
| 691
| 32.311143
| 156
| 0.639707
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,158
|
git.py
|
ansible_ansible/lib/ansible/modules/git.py
|
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
extends_documentation_fragment: action_common_attributes
options:
repo:
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
type: str
required: true
aliases: [ name ]
dest:
description:
- The path of where the repository should be checked out. This
is equivalent to C(git clone [repo_url] [directory]). The repository
named in O(repo) is not appended to this path and the destination directory must be empty. This
parameter is required, unless O(clone) is set to V(false).
type: path
required: true
version:
description:
- What version of the repository to check out. This can be
the literal string V(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case O(refspec) needs
to be specified if the given revision is not already available.
type: str
default: "HEAD"
accept_hostkey:
description:
- Will ensure or not that C(-o StrictHostKeyChecking=no) is present as an ssh option.
- Be aware that this disables a protection against MITM attacks.
- Those using OpenSSH >= 7.5 might want to use O(accept_newhostkey) or set O(ssh_opts) to V(StrictHostKeyChecking=accept-new)
instead, it does not remove the MITM issue but it does restrict it to the first attempt.
type: bool
default: 'no'
version_added: "1.5"
accept_newhostkey:
description:
- As of OpenSSH 7.5, C(-o StrictHostKeyChecking=accept-new) can be
used which is safer and will only accepts host keys which are
not present or are the same. If V(true), ensure that
C(-o StrictHostKeyChecking=accept-new) is present as an ssh option.
type: bool
default: 'no'
version_added: "2.12"
ssh_opts:
description:
- Options git will pass to ssh when used as protocol, it works via C(git)'s
E(GIT_SSH)/E(GIT_SSH_COMMAND) environment variables.
- For older versions it appends E(GIT_SSH_OPTS) (specific to this module) to the
variables above or via a wrapper script.
- Other options can add to this list, like O(key_file) and O(accept_hostkey).
- An example value could be C(-o StrictHostKeyChecking=no) (although this particular
option is better set by O(accept_hostkey)).
- The module ensures that C(BatchMode=yes) is always present to avoid prompts.
type: str
version_added: "1.5"
key_file:
description:
- Specify an optional private key file path, on the target host, to use for the checkout.
- This ensures C(IdentitiesOnly=yes) is present in O(ssh_opts).
type: path
version_added: "1.5"
reference:
description:
- Reference repository (see C(git clone --reference ...)).
type: str
version_added: "1.4"
remote:
description:
- Name of the remote.
type: str
default: "origin"
refspec:
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the C(git fetch) command.
An example value could be "refs/meta/config".
type: str
version_added: "1.9"
force:
description:
- If V(true), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
V(true) and could not be disabled. Prior to 1.9, the default was
V(true).
type: bool
default: 'no'
version_added: "0.7"
depth:
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is V(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
type: int
version_added: "1.2"
clone:
description:
- If V(false), do not clone the repository even if it does not exist locally.
type: bool
default: 'yes'
version_added: "1.9"
update:
description:
- If V(false), do not retrieve new revisions from the origin repository.
- Operations like archive will work on the existing (old) repository and might
not respond to changes to the options version or remote.
type: bool
default: 'yes'
version_added: "1.2"
executable:
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
type: path
version_added: "1.4"
bare:
description:
- If V(true), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
type: bool
default: 'no'
version_added: "1.4"
umask:
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
type: raw
version_added: "2.2"
recursive:
description:
- If V(false), repository will be cloned without the C(--recursive)
option, skipping sub-modules.
type: bool
default: 'yes'
version_added: "1.6"
single_branch:
description:
- Clone only the history leading to the tip of the specified revision.
type: bool
default: 'no'
version_added: '2.11'
track_submodules:
description:
- If V(true), submodules will track the latest commit on their
master branch (or other branch specified in C(.gitmodules)). If
V(false), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the C(--remote) flag
to git submodule update.
type: bool
default: 'no'
version_added: "1.8"
verify_commit:
description:
- If V(true), when cloning or checking out a O(version) verify the
signature of a GPG signed commit. This requires git version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be present in the GPG keyring.
type: bool
default: 'no'
version_added: "2.0"
archive:
description:
- Specify archive file path with extension. If specified, creates an
archive file of the specified format containing the tree structure
for the source tree.
Allowed archive formats ["zip", "tar.gz", "tar", "tgz"].
- This will clone and perform git archive from local directory as not
all git servers support git archive.
type: path
version_added: "2.4"
archive_prefix:
description:
- Specify a prefix to add to each file path in archive. Requires O(archive) to be specified.
version_added: "2.10"
type: str
separate_git_dir:
description:
- The path to place the cloned repository. If specified, Git repository
can be separated from working tree.
type: path
version_added: "2.7"
gpg_allowlist:
description:
- A list of trusted GPG fingerprints to compare to the fingerprint of the
GPG-signed commit.
- Only used when O(verify_commit=yes).
- Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag).
- Alias O(gpg_allowlist) is added in version 2.17.
- Alias O(gpg_whitelist) is deprecated and will be removed in version 2.21.
type: list
elements: str
default: []
aliases: [ gpg_whitelist ]
version_added: "2.9"
requirements:
- git>=1.7.1 (the command line tool)
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: C(ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts)."
"""
EXAMPLES = """
- name: Git checkout
ansible.builtin.git:
repo: 'https://github.com/ansible/ansible.git'
dest: /tmp/checkout
version: release-0.22
- name: Read-write git checkout from github
ansible.builtin.git:
repo: git@github.com:ansible/ansible.git
dest: /tmp/checkout
- name: Just ensuring the repo checkout exists
ansible.builtin.git:
repo: 'https://github.com/ansible/ansible.git'
dest: /tmp/checkout
update: no
- name: Just get information about the repository whether or not it has already been cloned locally
ansible.builtin.git:
repo: git@github.com:ansible/ansible.git
dest: /tmp/checkout
clone: no
update: no
- name: Checkout a github repo and use refspec to fetch all pull requests
ansible.builtin.git:
repo: 'https://github.com/ansible/ansible.git'
dest: /tmp/checkout
refspec: '+refs/pull/*:refs/heads/*'
- name: Create git archive from repo
ansible.builtin.git:
repo: git@github.com:ansible/ansible.git
dest: /tmp/checkout
archive: /tmp/ansible.zip
- name: Clone a repo with separate git directory
ansible.builtin.git:
repo: 'https://github.com/ansible/ansible.git'
dest: /tmp/checkout
separate_git_dir: /tmp/repo
- name: Example clone of a single branch
ansible.builtin.git:
repo: git@github.com:ansible/ansible.git
dest: /tmp/checkout
single_branch: yes
version: master
- name: Avoid hanging when http(s) password is missing
ansible.builtin.git:
repo: 'https://github.com/ansible/ansible.git'
dest: /tmp/checkout
environment:
GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
# or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
"""
RETURN = """
after:
description: Last commit revision of the repository retrieved during the update.
returned: success
type: str
sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
before:
description: Commit revision before the repository was updated, "null" for new repository.
returned: success
type: str
sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
remote_url_changed:
description: Contains True or False whether or not the remote URL was changed.
returned: success
type: bool
sample: True
warnings:
description: List of warnings if requested features were not available due to a too old git version.
returned: error
type: str
sample: git version is too old to fully support the depth argument. Falling back to full checkouts.
git_dir_now:
description: Contains the new path of .git directory if it is changed.
returned: success
type: str
sample: /path/to/new/git/dir
git_dir_before:
description: Contains the original path of .git directory if it is changed.
returned: success
type: str
sample: /path/to/old/git/dir
"""
import filecmp
import os
import re
import shlex
import stat
import sys
import shutil
import tempfile
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.six import b, string_types
def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir):
if os.path.exists(repo_dir):
module.fail_json(msg='Separate-git-dir path %s already exists.' % repo_dir)
if worktree_dir:
dot_git_file_path = os.path.join(worktree_dir, '.git')
try:
shutil.move(old_repo_dir, repo_dir)
with open(dot_git_file_path, 'w') as dot_git_file:
dot_git_file.write('gitdir: %s' % repo_dir)
result['git_dir_before'] = old_repo_dir
result['git_dir_now'] = repo_dir
except (IOError, OSError) as err:
# if we already moved the .git dir, roll it back
if os.path.exists(repo_dir):
shutil.move(repo_dir, old_repo_dir)
module.fail_json(msg=u'Unable to move git dir. %s' % to_text(err))
def head_splitter(headfile, remote, module=None, fail_on_error=False):
"""Extract the head reference"""
# https://github.com/ansible/ansible-modules-core/pull/907
res = None
if os.path.exists(headfile):
rawdata = None
try:
with open(headfile, 'r') as f:
rawdata = f.readline()
except Exception:
if fail_on_error and module:
module.fail_json(msg="Unable to read %s" % headfile)
if rawdata:
try:
rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
refparts = rawdata.split(' ')
newref = refparts[-1]
nrefparts = newref.split('/', 2)
res = nrefparts[-1].rstrip('\n')
except Exception:
if fail_on_error and module:
module.fail_json(msg="Unable to split head from '%s'" % rawdata)
return res
def unfrackgitpath(path):
if path is None:
return None
# copied from ansible.utils.path
return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
def get_submodule_update_params(module, git_path, cwd):
# or: git submodule [--quiet] update [--init] [-N|--no-fetch]
# [-f|--force] [--rebase] [--reference <repository>] [--merge]
# [--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[', '')
update_line = update_line.replace(']', '')
update_line = update_line.replace('|', ' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper(module):
"""
This writes an shell wrapper for ssh options to be used with git
this is only relevant for older versions of gitthat cannot
handle the options themselves. Returns path to the script
"""
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
# use existing git_ssh/ssh_command, fallback to 'ssh'
template = b("""#!/bin/sh
%s $GIT_SSH_OPTS "$@"
""" % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh')))
# write it
with os.fdopen(fd, 'w+b') as fh:
fh.write(template)
# set execute
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template))
# ensure we cleanup after ourselves
module.add_cleanup_file(path=wrapper_path)
return wrapper_path
def set_git_ssh_env(key_file, ssh_opts, git_version, module):
"""
use environment variables to configure git's ssh execution,
which varies by version but this function should handle all.
"""
# initialise to existing ssh opts and/or append user provided
if ssh_opts is None:
ssh_opts = os.environ.get('GIT_SSH_OPTS', '')
else:
ssh_opts = os.environ.get('GIT_SSH_OPTS', '') + ' ' + ssh_opts
# hostkey acceptance
accept_key = "StrictHostKeyChecking=no"
if module.params['accept_hostkey'] and accept_key not in ssh_opts:
ssh_opts += " -o %s" % accept_key
# avoid prompts
force_batch = 'BatchMode=yes'
if force_batch not in ssh_opts:
ssh_opts += ' -o %s' % (force_batch)
# deal with key file
if key_file:
key_opt = '-i %s' % key_file
if key_opt not in ssh_opts:
ssh_opts += ' %s' % key_opt
ikey = 'IdentitiesOnly=yes'
if ikey not in ssh_opts:
ssh_opts += ' -o %s' % ikey
# older than 2.3 does not know how to use git_ssh_command,
# so we force it into get_ssh var
# https://github.com/gitster/git/commit/09d60d785c68c8fa65094ecbe46fbc2a38d0fc1f
if git_version is not None and git_version < LooseVersion('2.3.0'):
# for use in wrapper
os.environ["GIT_SSH_OPTS"] = ssh_opts
# these versions don't support GIT_SSH_OPTS so have to write wrapper
wrapper = write_ssh_wrapper(module)
# force use of git_ssh_opts via wrapper, git_ssh cannot not handle arguments
os.environ['GIT_SSH'] = wrapper
else:
# we construct full finalized command string here
full_cmd = os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh'))
if ssh_opts:
full_cmd += ' ' + ssh_opts
# git_ssh_command can handle arguments to ssh
os.environ["GIT_SSH_COMMAND"] = full_cmd
def get_version(module, git_path, dest, ref="HEAD"):
""" samples the version of the git repo """
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = to_native(stdout).rstrip('\n')
return sha
def ssh_supports_acceptnewhostkey(module):
try:
ssh_path = get_bin_path('ssh')
except ValueError as err:
module.fail_json(
msg='Remote host is missing ssh command, so you cannot '
'use acceptnewhostkey option.', details=to_text(err))
supports_acceptnewhostkey = True
cmd = [ssh_path, '-o', 'StrictHostKeyChecking=accept-new', '-V']
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
supports_acceptnewhostkey = False
return supports_acceptnewhostkey
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(
msg='Unable to determine hashes of submodules',
stdout=out,
stderr=err,
rc=rc)
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_allowlist, single_branch):
""" makes a new git repo if it does not already exist """
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except Exception:
pass
cmd = [git_path, 'clone']
if bare:
cmd.append('--bare')
else:
cmd.extend(['--origin', remote])
is_branch_or_tag = is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)
if depth:
if version == 'HEAD' or refspec:
cmd.extend(['--depth', str(depth)])
elif is_branch_or_tag:
cmd.extend(['--depth', str(depth)])
cmd.extend(['--branch', version])
else:
# only use depth if the remote object is branch or tag (i.e. fetchable)
module.warn("Ignoring depth argument. "
"Shallow clones are only available for "
"HEAD, branches, tags or in combination with refspec.")
if reference:
cmd.extend(['--reference', str(reference)])
if single_branch:
if git_version_used is None:
module.fail_json(msg='Cannot find git executable at %s' % git_path)
if git_version_used < LooseVersion('1.7.10'):
module.warn("git version '%s' is too old to use 'single-branch'. Ignoring." % git_version_used)
else:
cmd.append("--single-branch")
if is_branch_or_tag:
cmd.extend(['--branch', version])
needs_separate_git_dir_fallback = False
if separate_git_dir:
if git_version_used is None:
module.fail_json(msg='Cannot find git executable at %s' % git_path)
if git_version_used < LooseVersion('1.7.5'):
# git before 1.7.5 doesn't have separate-git-dir argument, do fallback
needs_separate_git_dir_fallback = True
else:
cmd.append('--separate-git-dir=%s' % separate_git_dir)
cmd.extend([repo, dest])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if needs_separate_git_dir_fallback:
relocate_repo(module, result, separate_git_dir, os.path.join(dest, ".git"), dest)
if bare and remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend(['--depth', str(depth)])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version, gpg_allowlist)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status --porcelain" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
return len(lines) > 0
def reset(git_path, module, dest):
"""
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
"""
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
""" Return the difference between 2 versions """
if before is None:
return {'prepared': '>> Newly checked out %s' % after}
elif before != after:
# Ensure we have the object we are referring to during git diff !
git_version_used = git_version(git_path, module)
fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
cmd = '%s diff %s %s' % (git_path, before, after)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc == 0 and out:
return {'prepared': out}
elif rc == 0:
return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
elif err:
return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
else:
return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
return {}
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
elif remote == 'file://' + os.path.expanduser(module.params['repo']):
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
out = to_native(out)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch --no-color -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
for line in out.split('\n'):
if line.strip():
branches.append(line.strip())
return branches
def get_annotated_tags(git_path, module, dest):
tags = []
cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
for line in to_native(out).split('\n'):
if line.strip():
tagtype, tagname = line.strip().split(':')
if tagtype == 'tag':
tags.append(tagname)
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for branch in branches:
if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch or 'detached at' in branch):
return True
return False
def get_repo_path(dest, bare):
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that the repository is in external directory respective to the working copy (e.g. we are in a
# submodule structure).
if os.path.isfile(repo_path):
with open(repo_path, 'r') as gitfile:
data = gitfile.read()
ref_prefix, gitdir = data.rstrip().split('gitdir: ', 1)
if ref_prefix:
raise ValueError('.git file has invalid git dir reference format')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
# Use original destination directory with data from .git file.
repo_path = os.path.join(dest, gitdir)
if not os.path.isdir(repo_path):
raise ValueError('%s is not a directory' % repo_path)
return repo_path
def get_head_branch(git_path, module, dest, remote, bare=False):
"""
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
"""
try:
repo_path = get_repo_path(dest, bare)
except (IOError, ValueError) as err:
# No repo path found
# ``.git`` file does not have a valid format for detached Git dir.
module.fail_json(
msg='Current repo does not have a valid reference to a '
'separate Git dir or it refers to the invalid path',
details=to_text(err),
)
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
headfile = os.path.join(repo_path, "HEAD")
if is_not_a_branch(git_path, module, dest):
headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
return branch
def get_remote_url(git_path, module, dest, remote):
"""Return URL of remote source for repo."""
command = [git_path, 'ls-remote', '--get-url', remote]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
# There was an issue getting remote URL, most likely
# command is not available in this version of Git.
return None
return to_native(out).rstrip('\n')
def set_remote_url(git_path, module, repo, dest, remote):
""" updates repo from remote sources """
# Return if remote URL isn't changing.
remote_url = get_remote_url(git_path, module, dest, remote)
if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
return False
command = [git_path, 'remote', 'set-url', remote, repo]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
label = "set a new url %s for %s" % (repo, remote)
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
# Return False if remote_url is None to maintain previous behavior
# for Git versions prior to 1.7.5 that lack required functionality.
return remote_url is not None
def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=False):
""" updates repo from remote sources """
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
fetch_cmd = [git_path, 'fetch']
refspecs = []
if depth:
# try to find the minimal set of refs we need to fetch to get a
# successful checkout
currenthead = get_head_branch(git_path, module, dest, remote)
if refspec:
refspecs.append(refspec)
elif version == 'HEAD':
refspecs.append(currenthead)
elif is_remote_branch(git_path, module, dest, repo, version):
if currenthead != version:
# this workaround is only needed for older git versions
# 1.8.3 is broken, 1.9.x works
# ensure that remote branch is available as both local and remote ref
refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
elif is_remote_tag(git_path, module, dest, repo, version):
refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
if refspecs:
# if refspecs is empty, i.e. version is neither heads nor tags
# assume it is a version hash
# fall back to a full clone, otherwise we might not be able to checkout
# version
fetch_cmd.extend(['--depth', str(depth)])
if not depth or not refspecs:
# don't try to be minimalistic but do a full clone
# also do this if depth is given, but version is something that can't be fetched directly
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
if git_version_used is not None and git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
commands.append((fetch_str, fetch_cmd + [remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
if force:
fetch_cmd.append('--force')
fetch_cmd.extend([remote])
commands.append((fetch_str, fetch_cmd + refspecs))
for (label, command) in commands:
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
# FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules, force=False):
""" init and update any submodules """
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [git_path, 'submodule', 'sync']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
else:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
if force:
cmd.append('--force')
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def set_remote_branch(git_path, module, dest, remote, version, depth):
"""set refs for the remote branch version
This assumes the branch does not yet exist locally and is therefore also not checked out.
Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
"""
branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_allowlist):
cmd = ''
if version == 'HEAD':
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
else:
# FIXME check for local_branch first, should have been fetched already
if is_remote_branch(git_path, module, dest, remote, version):
if depth and not is_local_branch(git_path, module, dest, version):
# git clone --depth implies --single-branch, which makes
# the checkout fail if the version changes
# fetch the remote branch, to be able to check it out next
set_remote_branch(git_path, module, dest, remote, version, depth)
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
if verify_commit:
verify_commit_sign(git_path, module, dest, version, gpg_allowlist)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version, gpg_allowlist):
if version in get_annotated_tags(git_path, module, dest):
git_sub = "verify-tag"
else:
git_sub = "verify-commit"
cmd = "%s %s %s" % (git_path, git_sub, version)
if gpg_allowlist:
cmd += " --raw"
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
if gpg_allowlist:
fingerprint = get_gpg_fingerprint(err)
if fingerprint not in gpg_allowlist:
module.fail_json(msg='The gpg_allowlist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
return (rc, out, err)
def get_gpg_fingerprint(output):
"""Return a fingerprint of the primary key.
Ref:
https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;hb=HEAD#l482
"""
for line in output.splitlines():
data = line.split()
if data[1] != 'VALIDSIG':
continue
# if signed with a subkey, this contains the primary key fingerprint
data_id = 11 if len(data) == 11 else 2
return data[data_id]
def git_version(git_path, module):
"""return the installed version of git"""
cmd = "%s --version" % git_path
(rc, out, err) = module.run_command(cmd)
if rc != 0:
# one could fail_json here, but the version info is not that important,
# so let's try to fail only on actual git commands
return None
rematch = re.search('git version (.*)$', to_native(out))
if not rematch:
return None
return LooseVersion(rematch.groups()[0])
def git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version):
""" Create git archive in given source directory """
cmd = [git_path, 'archive', '--format', archive_fmt, '--output', archive, version]
if archive_prefix is not None:
cmd.insert(-1, '--prefix')
cmd.insert(-1, archive_prefix)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to perform archive operation",
details="Git archive command failed to create "
"archive %s using %s directory."
"Error: %s" % (archive, dest, err))
return rc, out, err
def create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result):
""" Helper function for creating archive using git_archive """
all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
'.tgz': 'tgz'}
dummy, archive_ext = os.path.splitext(archive)
archive_fmt = all_archive_fmt.get(archive_ext, None)
if archive_fmt is None:
module.fail_json(msg="Unable to get file extension from "
"archive file name : %s" % archive,
details="Please specify archive as filename with "
"extension. File extension can be one "
"of ['tar', 'tar.gz', 'zip', 'tgz']")
repo_name = repo.split("/")[-1].replace(".git", "")
if os.path.exists(archive):
# If git archive file exists, then compare it with new git archive file.
# if match, do nothing
# if does not match, then replace existing with temp archive file.
tempdir = tempfile.mkdtemp()
new_archive_dest = os.path.join(tempdir, repo_name)
new_archive = new_archive_dest + '.' + archive_fmt
git_archive(git_path, module, dest, new_archive, archive_fmt, archive_prefix, version)
# filecmp is supposed to be efficient than md5sum checksum
if filecmp.cmp(new_archive, archive):
result.update(changed=False)
# Cleanup before exiting
try:
shutil.rmtree(tempdir)
except OSError:
pass
else:
try:
shutil.move(new_archive, archive)
shutil.rmtree(tempdir)
result.update(changed=True)
except OSError as e:
module.fail_json(msg="Failed to move %s to %s" %
(new_archive, archive),
details=u"Error occurred while moving : %s"
% to_text(e))
else:
# Perform archive from local directory
git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version)
result.update(changed=True)
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path'),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
gpg_allowlist=dict(
default=[], type='list', aliases=['gpg_whitelist'], elements='str',
deprecated_aliases=[
dict(
name='gpg_whitelist',
version='2.21',
collection_name='ansible.builtin',
)
],
),
accept_hostkey=dict(default='no', type='bool'),
accept_newhostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None, type='path'),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
single_branch=dict(default=False, type='bool'),
track_submodules=dict(default='no', type='bool'),
umask=dict(default=None, type='raw'),
archive=dict(type='path'),
archive_prefix=dict(),
separate_git_dir=dict(type='path'),
),
mutually_exclusive=[('separate_git_dir', 'bare'), ('accept_hostkey', 'accept_newhostkey')],
required_by={'archive_prefix': ['archive']},
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
gpg_allowlist = module.params['gpg_allowlist']
reference = module.params['reference']
single_branch = module.params['single_branch']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
umask = module.params['umask']
archive = module.params['archive']
archive_prefix = module.params['archive_prefix']
separate_git_dir = module.params['separate_git_dir']
result = dict(changed=False, warnings=list())
if module.params['accept_hostkey']:
if ssh_opts is not None:
if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
ssh_opts += " -o StrictHostKeyChecking=no"
else:
ssh_opts = "-o StrictHostKeyChecking=no"
if module.params['accept_newhostkey']:
if not ssh_supports_acceptnewhostkey(module):
module.warn("Your ssh client does not support accept_newhostkey option, therefore it cannot be used.")
else:
if ssh_opts is not None:
if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
ssh_opts += " -o StrictHostKeyChecking=accept-new"
else:
ssh_opts = "-o StrictHostKeyChecking=accept-new"
# evaluate and set the umask before doing anything else
if umask is not None:
if not isinstance(umask, string_types):
module.fail_json(msg="umask must be defined as a quoted octal integer")
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_text(sys.exc_info()[1]))
os.umask(umask)
# Certain features such as depth require a file:/// protocol for path based urls
# so force a protocol here ...
if os.path.expanduser(repo).startswith('/'):
repo = 'file://' + os.path.expanduser(repo)
# We screenscrape a huge amount of git commands so use C locale anytime we
# call run_command()
locale = get_best_parsable_locale(module)
module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale)
if separate_git_dir:
separate_git_dir = os.path.realpath(separate_git_dir)
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(dest)
try:
repo_path = get_repo_path(dest, bare)
if separate_git_dir and os.path.exists(repo_path) and separate_git_dir != repo_path:
result['changed'] = True
if not module.check_mode:
relocate_repo(module, result, separate_git_dir, repo_path, dest)
repo_path = separate_git_dir
except (IOError, ValueError) as err:
# No repo path found
# ``.git`` file does not have a valid format for detached Git dir.
module.fail_json(
msg='Current repo does not have a valid reference to a '
'separate Git dir or it refers to the invalid path',
details=to_text(err),
)
gitconfig = os.path.join(repo_path, 'config')
# iface changes so need it to make decisions
git_version_used = git_version(git_path, module)
# GIT_SSH=<path> as an environment variable, might create sh wrapper script for older versions.
set_git_ssh_env(key_file, ssh_opts, git_version_used, module)
if depth is not None and git_version_used is not None and git_version_used < LooseVersion('1.9.1'):
module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
result.update(before=None)
local_mods = False
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
result.update(changed=True, after=remote_head)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference,
refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_allowlist, single_branch)
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
result['before'] = get_version(module, git_path, dest)
result.update(after=result['before'])
if archive:
# Git archive is not supported by all git servers, so
# we will first clone and perform git archive from local directory
if module.check_mode:
result.update(changed=True)
module.exit_json(**result)
create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
module.exit_json(**result)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
result['before'] = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in the destination: " + dest + " (force=no).", **result)
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
result.update(changed=True, msg='Local modifications exist in the destination: ' + dest)
# exit if already at desired sha version
if module.check_mode:
remote_url = get_remote_url(git_path, module, dest, remote)
remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
else:
remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
result.update(remote_url_changed=remote_url_changed)
if module.check_mode:
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
# FIXME: This diff should fail since the new remote_head is not fetched yet?!
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
else:
fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=force)
result['after'] = get_version(module, git_path, dest)
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_allowlist)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if submodules_updated:
result.update(submodules_changed=submodules_updated)
if module.check_mode:
result.update(changed=True, after=remote_head)
module.exit_json(**result)
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules, force=force)
# determine if we changed anything
result['after'] = get_version(module, git_path, dest)
if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
result.update(changed=True)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
if archive:
# Git archive is not supported by all git servers, so
# we will first clone and perform git archive from local directory
if module.check_mode:
result.update(changed=True)
module.exit_json(**result)
create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 57,029
|
Python
|
.py
| 1,240
| 37.050806
| 154
| 0.618489
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,159
|
debconf.py
|
ansible_ansible/lib/ansible/modules/debconf.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections.
- Or just query existing selections.
version_added: "1.6"
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
support: full
platforms: debian
notes:
- This module requires the command line debconf tools.
- Several questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to C(debconf-get-selections) masking passwords.
- It is highly recommended to add C(no_log=True) to the task while handling sensitive information using this module.
- The M(ansible.builtin.debconf) module does not reconfigure packages, it just updates the debconf database.
An additional step is needed (typically with C(notify) if debconf makes a change)
to reconfigure the package and apply the changes.
C(debconf) is extensively used for pre-seeding configuration prior to installation
rather than modifying configurations.
So, while C(dpkg-reconfigure) does use debconf data, it is not always authoritative
and you may need to check how your package is handled.
- Also note C(dpkg-reconfigure) is a 3-phase process. It invokes the
control scripts from the C(/var/lib/dpkg/info) directory with the
C(<package>.prerm reconfigure <version>),
C(<package>.config reconfigure <version>) and C(<package>.postinst control <version>) arguments.
- The main issue is that the C(<package>.config reconfigure) step for many packages
will first reset the debconf database (overriding changes made by this module) by
checking the on-disk configuration. If this is the case for your package then
C(dpkg-reconfigure) will effectively ignore changes made by debconf.
- However as C(dpkg-reconfigure) only executes the C(<package>.config) step if the file
exists, it is possible to rename it to C(/var/lib/dpkg/info/<package>.config.ignore)
before executing C(dpkg-reconfigure -f noninteractive <package>) and then restore it.
This seems to be compliant with Debian policy for the .config file.
requirements:
- debconf
- debconf-utils
options:
name:
description:
- Name of package to configure.
type: str
required: true
aliases: [ pkg ]
question:
description:
- A debconf configuration setting.
type: str
aliases: [ selection, setting ]
vtype:
description:
- The type of the value supplied.
- It is highly recommended to add C(no_log=True) to task while specifying O(vtype=password).
- V(seen) was added in Ansible 2.2.
- After Ansible 2.17, user can specify C(value) as a list, if C(vtype) is set as V(multiselect).
type: str
choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ]
value:
description:
- Value to set the configuration to.
- After Ansible 2.17, C(value) is of type C(raw).
type: raw
aliases: [ answer ]
unseen:
description:
- Do not set C(seen) flag when pre-seeding.
type: bool
default: false
author:
- Brian Coca (@bcoca)
"""
EXAMPLES = r"""
- name: Set default locale to fr_FR.UTF-8
ansible.builtin.debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
- name: Set to generate locales
ansible.builtin.debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
- name: Accept oracle license
ansible.builtin.debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: 'true'
vtype: select
- name: Specifying package you can register/return the list of questions and current values
ansible.builtin.debconf:
name: tzdata
- name: Pre-configure tripwire site passphrase
ansible.builtin.debconf:
name: tripwire
question: tripwire/site-passphrase
value: "{{ site_passphrase }}"
vtype: password
no_log: True
"""
RETURN = r"""#"""
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.basic import AnsibleModule
def get_password_value(module, pkg, question, vtype):
getsel = module.get_bin_path('debconf-get-selections', True)
cmd = [getsel]
rc, out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg=f"Failed to get the value '{question}' from '{pkg}': {err}")
for line in out.split("\n"):
if not line.startswith(pkg):
continue
# line is a collection of tab separated values
fields = line.split('\t')
if len(fields) <= 3:
# No password found, return a blank password
return ''
try:
if fields[1] == question and fields[2] == vtype:
# If correct question and question type found, return password value
return fields[3]
except IndexError:
# Fail safe
return ''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[key.strip('*').strip()] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['pkg']),
question=dict(type='str', aliases=['selection', 'setting']),
vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
value=dict(type='raw', aliases=['answer']),
unseen=dict(type='bool', default=False),
),
required_together=(['question', 'vtype', 'value'],),
supports_check_mode=True,
)
# TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
# ensure we compare booleans supplied to the way debconf sees them (true/false strings)
if vtype == 'boolean':
value = to_text(value).lower()
# if question doesn't exist, value cannot match
if question not in prev:
changed = True
else:
existing = prev[question]
if vtype == 'boolean':
existing = to_text(prev[question]).lower()
elif vtype == 'password':
existing = get_password_value(module, pkg, question, vtype)
elif vtype == 'multiselect' and isinstance(value, list):
try:
value = sorted(value)
except TypeError as exc:
module.fail_json(msg="Invalid value provided for 'multiselect': %s" % to_native(exc))
existing = sorted([i.strip() for i in existing.split(",")])
if value != existing:
changed = True
if changed:
if not module.check_mode:
if vtype == 'multiselect' and isinstance(value, list):
try:
value = ", ".join(value)
except TypeError as exc:
module.fail_json(msg="Invalid value provided for 'multiselect': %s" % to_native(exc))
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = {question: value}
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
diff_dict = {}
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
if __name__ == '__main__':
main()
| 9,362
|
Python
|
.py
| 224
| 34.696429
| 150
| 0.653914
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,160
|
subversion.py
|
ansible_ansible/lib/ansible/modules/subversion.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: subversion
short_description: Deploys a subversion repository
description:
- Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
version_added: "0.7"
author:
- Dane Summers (@dsummersl) <njharman@gmail.com>
options:
repo:
description:
- The subversion URL to the repository.
type: str
required: true
aliases: [ name, repository ]
dest:
description:
- Absolute path where the repository should be deployed.
- The destination directory must be specified unless O(checkout=no), O(update=no), and O(export=no).
type: path
revision:
description:
- Specific revision to checkout.
type: str
default: HEAD
aliases: [ rev, version ]
force:
description:
- If V(true), modified files will be discarded. If V(false), module will fail if it encounters modified files.
Prior to 1.9 the default was V(true).
type: bool
default: "no"
in_place:
description:
- If the directory exists, then the working copy will be checked-out over-the-top using
C(svn checkout --force); if force is specified then existing files with different content are reverted.
type: bool
default: "no"
version_added: "2.6"
username:
description:
- C(--username) parameter passed to svn.
type: str
password:
description:
- C(--password) parameter passed to svn when svn is less than version 1.10.0. This is not secure and
the password will be leaked to argv.
- C(--password-from-stdin) parameter when svn is greater or equal to version 1.10.0.
type: str
executable:
description:
- Path to svn executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
type: path
version_added: "1.4"
checkout:
description:
- If V(false), do not check out the repository if it does not exist locally.
type: bool
default: "yes"
version_added: "2.3"
update:
description:
- If V(false), do not retrieve new revisions from the origin repository.
type: bool
default: "yes"
version_added: "2.3"
export:
description:
- If V(true), do export instead of checkout/update.
type: bool
default: "no"
version_added: "1.6"
switch:
description:
- If V(false), do not call svn switch before update.
default: "yes"
version_added: "2.0"
type: bool
validate_certs:
description:
- If V(false), passes the C(--trust-server-cert) flag to svn.
- If V(true), does not pass the flag.
default: "no"
version_added: "2.11"
type: bool
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
notes:
- This module does not handle externals.
requirements:
- subversion (the command line tool with C(svn) entrypoint)
"""
EXAMPLES = """
- name: Checkout subversion repository to specified folder
ansible.builtin.subversion:
repo: svn+ssh://an.example.org/path/to/repo
dest: /src/checkout
- name: Export subversion directory to folder
ansible.builtin.subversion:
repo: svn+ssh://an.example.org/path/to/repo
dest: /src/export
export: yes
- name: Get information about the repository whether or not it has already been cloned locally
ansible.builtin.subversion:
repo: svn+ssh://an.example.org/path/to/repo
dest: /src/checkout
checkout: no
update: no
"""
RETURN = r"""#"""
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.compat.version import LooseVersion
class Subversion(object):
# Example text matched by the regexp:
# Révision : 1889134
# 版本: 1889134
# Revision: 1889134
REVISION_RE = r'^\w+\s?:\s+\d+$'
def __init__(self, module, dest, repo, revision, username, password, svn_path, validate_certs):
self.module = module
self.dest = dest
self.repo = repo
self.revision = revision
self.username = username
self.password = password
self.svn_path = svn_path
self.validate_certs = validate_certs
def has_option_password_from_stdin(self):
rc, version, err = self.module.run_command([self.svn_path, '--version', '--quiet'], check_rc=True)
return LooseVersion(version) >= LooseVersion('1.10.0')
def _exec(self, args, check_rc=True):
"""Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output."""
bits = [
self.svn_path,
'--non-interactive',
'--no-auth-cache',
]
if not self.validate_certs:
bits.append('--trust-server-cert')
stdin_data = None
if self.username:
bits.extend(["--username", self.username])
if self.password:
if self.has_option_password_from_stdin():
bits.append("--password-from-stdin")
stdin_data = self.password
else:
self.module.warn("The authentication provided will be used on the svn command line and is not secure. "
"To securely pass credentials, upgrade svn to version 1.10.0 or greater.")
bits.extend(["--password", self.password])
bits.extend(args)
rc, out, err = self.module.run_command(bits, check_rc, data=stdin_data)
if check_rc:
return out.splitlines()
else:
return rc
def is_svn_repo(self):
"""Checks if path is a SVN Repo."""
rc = self._exec(["info", self.dest], check_rc=False)
return rc == 0
def checkout(self, force=False):
"""Creates new svn working directory if it does not already exist."""
cmd = ["checkout"]
if force:
cmd.append("--force")
cmd.extend(["-r", self.revision, self.repo, self.dest])
self._exec(cmd)
def export(self, force=False):
"""Export svn repo to directory"""
cmd = ["export"]
if force:
cmd.append("--force")
cmd.extend(["-r", self.revision, self.repo, self.dest])
self._exec(cmd)
def switch(self):
"""Change working directory's repo."""
# switch to ensure we are pointing at correct repo.
# it also updates!
output = self._exec(["switch", "--revision", self.revision, self.repo, self.dest])
for line in output:
if re.search(r'^[ABDUCGE]\s', line):
return True
return False
def update(self):
"""Update existing svn working directory."""
output = self._exec(["update", "-r", self.revision, self.dest])
for line in output:
if re.search(r'^[ABDUCGE]\s', line):
return True
return False
def revert(self):
"""Revert svn working directory."""
output = self._exec(["revert", "-R", self.dest])
for line in output:
if re.search(r'^Reverted ', line) is None:
return True
return False
def get_revision(self):
"""Revision and URL of subversion working directory."""
text = '\n'.join(self._exec(["info", self.dest]))
rev = re.search(self.REVISION_RE, text, re.MULTILINE)
if rev:
rev = rev.group(0)
else:
rev = 'Unable to get revision'
url = re.search(r'^URL\s?:.*$', text, re.MULTILINE)
if url:
url = url.group(0)
else:
url = 'Unable to get URL'
return rev, url
def get_remote_revision(self):
"""Revision and URL of subversion working directory."""
text = '\n'.join(self._exec(["info", self.repo]))
rev = re.search(self.REVISION_RE, text, re.MULTILINE)
if rev:
rev = rev.group(0)
else:
rev = 'Unable to get remote revision'
return rev
def has_local_mods(self):
"""True if revisioned files have been added or modified. Unrevisioned files are ignored."""
lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
# The --quiet option will return only modified files.
# Match only revisioned files, i.e. ignore status '?'.
regex = re.compile(r'^[^?X]')
# Has local mods if more than 0 modified revisioned files.
return len(list(filter(regex.match, lines))) > 0
def needs_update(self):
curr, url = self.get_revision()
out2 = '\n'.join(self._exec(["info", "-r", self.revision, self.dest]))
head = re.search(self.REVISION_RE, out2, re.MULTILINE)
if head:
head = head.group(0)
else:
head = 'Unable to get revision'
rev1 = int(curr.split(':')[1].strip())
rev2 = int(head.split(':')[1].strip())
change = False
if rev1 < rev2:
change = True
return change, curr, head
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path'),
repo=dict(type='str', required=True, aliases=['name', 'repository']),
revision=dict(type='str', default='HEAD', aliases=['rev', 'version']),
force=dict(type='bool', default=False),
username=dict(type='str'),
password=dict(type='str', no_log=True),
executable=dict(type='path'),
export=dict(type='bool', default=False),
checkout=dict(type='bool', default=True),
update=dict(type='bool', default=True),
switch=dict(type='bool', default=True),
in_place=dict(type='bool', default=False),
validate_certs=dict(type='bool', default=False),
),
supports_check_mode=True,
)
dest = module.params['dest']
repo = module.params['repo']
revision = module.params['revision']
force = module.params['force']
username = module.params['username']
password = module.params['password']
svn_path = module.params['executable'] or module.get_bin_path('svn', True)
export = module.params['export']
switch = module.params['switch']
checkout = module.params['checkout']
update = module.params['update']
in_place = module.params['in_place']
validate_certs = module.params['validate_certs']
# We screenscrape a huge amount of svn commands so use C locale anytime we
# call run_command()
locale = get_best_parsable_locale(module)
module.run_command_environ_update = dict(LANG=locale, LC_MESSAGES=locale)
if not dest and (checkout or update or export):
module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no")
svn = Subversion(module, dest, repo, revision, username, password, svn_path, validate_certs)
if not export and not update and not checkout:
module.exit_json(changed=False, after=svn.get_remote_revision())
if export or not os.path.exists(dest):
before = None
local_mods = False
if module.check_mode:
module.exit_json(changed=True)
elif not export and not checkout:
module.exit_json(changed=False)
if not export and checkout:
svn.checkout()
files_changed = True
else:
svn.export(force=force)
files_changed = True
elif svn.is_svn_repo():
# Order matters. Need to get local mods before switch to avoid false
# positives. Need to switch before revert to ensure we are reverting to
# correct repo.
if not update:
module.exit_json(changed=False)
if module.check_mode:
if svn.has_local_mods() and not force:
module.fail_json(msg="ERROR: modified files exist in the repository.")
check, before, after = svn.needs_update()
module.exit_json(changed=check, before=before, after=after)
files_changed = False
before = svn.get_revision()
local_mods = svn.has_local_mods()
if switch:
files_changed = svn.switch() or files_changed
if local_mods:
if force:
files_changed = svn.revert() or files_changed
else:
module.fail_json(msg="ERROR: modified files exist in the repository.")
files_changed = svn.update() or files_changed
elif in_place:
before = None
svn.checkout(force=True)
files_changed = True
local_mods = svn.has_local_mods()
if local_mods and force:
svn.revert()
else:
module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest,))
if export:
module.exit_json(changed=True)
else:
after = svn.get_revision()
changed = files_changed or local_mods
module.exit_json(changed=changed, before=before, after=after)
if __name__ == '__main__':
main()
| 13,485
|
Python
|
.py
| 350
| 30.842857
| 132
| 0.625
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,161
|
include_tasks.py
|
ansible_ansible/lib/ansible/modules/include_tasks.py
|
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
author: Ansible Core Team (@ansible)
module: include_tasks
short_description: Dynamically include a task list
description:
- Includes a file with a list of tasks to be executed in the current playbook.
version_added: '2.4'
options:
file:
description:
- Specifies the name of the file that lists tasks to add to the current playbook.
type: str
version_added: '2.7'
apply:
description:
- Accepts a hash of task keywords (for example C(tags), C(become)) that will be applied to the tasks within the include.
type: str
version_added: '2.7'
free-form:
description:
- |
Specifies the name of the imported file directly without any other option C(- include_tasks: file.yml).
- Is the equivalent of specifying an argument for the O(file) parameter.
- Most keywords, including loop, with_items, and conditionals, apply to this statement unlike M(ansible.builtin.import_tasks).
- The do-until loop is not supported.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
- action_core.include
attributes:
check_mode:
support: none
diff_mode:
support: none
seealso:
- module: ansible.builtin.import_playbook
- module: ansible.builtin.import_role
- module: ansible.builtin.import_tasks
- module: ansible.builtin.include_role
- ref: playbooks_reuse
description: More information related to including and importing playbooks, roles and tasks.
"""
EXAMPLES = r"""
- hosts: all
tasks:
- ansible.builtin.debug:
msg: task1
- name: Include task list in play
ansible.builtin.include_tasks:
file: stuff.yaml
- ansible.builtin.debug:
msg: task10
- hosts: all
tasks:
- ansible.builtin.debug:
msg: task1
- name: Include task list in play only if the condition is true
ansible.builtin.include_tasks: "{{ hostvar }}.yaml"
when: hostvar is defined
- name: Apply tags to tasks within included file
ansible.builtin.include_tasks:
file: install.yml
apply:
tags:
- install
tags:
- always
- name: Apply tags to tasks within included file when using free-form
ansible.builtin.include_tasks: install.yml
args:
apply:
tags:
- install
tags:
- always
"""
RETURN = r"""
# This module does not return anything except tasks to execute.
"""
| 2,659
|
Python
|
.py
| 86
| 26.77907
| 132
| 0.708708
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,162
|
lineinfile.py
|
ansible_ansible/lib/ansible/modules/lineinfile.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: lineinfile
short_description: Manage lines in text files
description:
- This module ensures a particular line is in a file, or replace an
existing line using a back-referenced regular expression.
- This is primarily useful when you want to change a single line in a file only.
- See the M(ansible.builtin.replace) module if you want to change multiple, similar lines
or check M(ansible.builtin.blockinfile) if you want to insert/update/remove a block of lines in a file.
For other cases, see the M(ansible.builtin.copy) or M(ansible.builtin.template) modules.
version_added: "0.7"
options:
path:
description:
- The file to modify.
- Before Ansible 2.3 this option was only usable as O(dest), O(destfile) and O(name).
type: path
required: true
aliases: [ dest, destfile, name ]
regexp:
description:
- The regular expression to look for in every line of the file.
- For O(state=present), the pattern to replace if found. Only the last line found will be replaced.
- For O(state=absent), the pattern of the line(s) to remove.
- If the regular expression is not matched, the line will be
added to the file in keeping with O(insertbefore) or O(insertafter)
settings.
- When modifying a line the regexp should typically match both the initial state of
the line as well as its state after replacement by O(line) to ensure idempotence.
- Uses Python regular expressions. See U(https://docs.python.org/3/library/re.html).
type: str
aliases: [ regex ]
version_added: '1.7'
search_string:
description:
- The literal string to look for in every line of the file. This does not have to match the entire line.
- For O(state=present), the line to replace if the string is found in the file. Only the last line found will be replaced.
- For O(state=absent), the line(s) to remove if the string is in the line.
- If the literal expression is not matched, the line will be
added to the file in keeping with O(insertbefore) or O(insertafter)
settings.
- Mutually exclusive with O(backrefs) and O(regexp).
type: str
version_added: '2.11'
state:
description:
- Whether the line should be there or not.
type: str
choices: [ absent, present ]
default: present
line:
description:
- The line to insert/replace into the file.
- Required for O(state=present).
- If O(backrefs) is set, may contain backreferences that will get
expanded with the O(regexp) capture groups if the regexp matches.
type: str
aliases: [ value ]
backrefs:
description:
- Used with O(state=present).
- If set, O(line) can contain backreferences (both positional and named)
that will get populated if the O(regexp) matches.
- This parameter changes the operation of the module slightly;
O(insertbefore) and O(insertafter) will be ignored, and if the O(regexp)
does not match anywhere in the file, the file will be left unchanged.
- If the O(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
- Mutually exclusive with O(search_string).
type: bool
default: no
version_added: "1.1"
insertafter:
description:
- Used with O(state=present).
- If specified, the line will be inserted after the last match of specified regular expression.
- If the first match is required, use(firstmatch=yes).
- A special value is available; V(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches or no value is passed, V(EOF) will be used instead.
- If O(insertbefore) is set, default value V(EOF) will be ignored.
- If regular expressions are passed to both O(regexp) and O(insertafter), O(insertafter) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertbefore).
type: str
insertbefore:
description:
- Used with O(state=present).
- If specified, the line will be inserted before the last match of specified regular expression.
- If the first match is required, use O(firstmatch=yes).
- A value is available; V(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file.
- If regular expressions are passed to both O(regexp) and O(insertbefore), O(insertbefore) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertafter).
type: str
version_added: "1.1"
create:
description:
- Used with O(state=present).
- If specified, the file will be created if it does not already exist.
- By default it will fail if the file is missing.
type: bool
default: no
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
firstmatch:
description:
- Used with O(insertafter) or O(insertbefore).
- If set, O(insertafter) and O(insertbefore) will work with the first line that matches the given regular expression.
type: bool
default: no
version_added: "2.5"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.files
- files
- validate
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix
safe_file_operations:
support: full
vault:
support: none
notes:
- As of Ansible 2.3, the O(dest) option has been changed to O(path) as default, but O(dest) still works as well.
seealso:
- module: ansible.builtin.blockinfile
- module: ansible.builtin.copy
- module: ansible.builtin.file
- module: ansible.builtin.replace
- module: ansible.builtin.template
- module: community.windows.win_lineinfile
author:
- Daniel Hokka Zakrissoni (@dhozac)
- Ahti Kitsik (@ahtik)
- Jose Angel Munoz (@imjoseangel)
"""
EXAMPLES = r"""
# NOTE: Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- name: Ensure SELinux is set to enforcing mode
ansible.builtin.lineinfile:
path: /etc/selinux/config
regexp: '^SELINUX='
line: SELINUX=enforcing
- name: Make sure group wheel is not in the sudoers configuration
ansible.builtin.lineinfile:
path: /etc/sudoers
state: absent
regexp: '^%wheel'
- name: Replace a localhost entry with our own
ansible.builtin.lineinfile:
path: /etc/hosts
regexp: '^127\.0\.0\.1'
line: 127.0.0.1 localhost
owner: root
group: root
mode: '0644'
- name: Replace a localhost entry searching for a literal string to avoid escaping
ansible.builtin.lineinfile:
path: /etc/hosts
search_string: '127.0.0.1'
line: 127.0.0.1 localhost
owner: root
group: root
mode: '0644'
- name: Ensure the default Apache port is 8080
ansible.builtin.lineinfile:
path: /etc/httpd/conf/httpd.conf
regexp: '^Listen '
insertafter: '^#Listen '
line: Listen 8080
- name: Ensure php extension matches new pattern
ansible.builtin.lineinfile:
path: /etc/httpd/conf/httpd.conf
search_string: '<FilesMatch ".php[45]?$">'
insertafter: '^\t<Location \/>\n'
line: ' <FilesMatch ".php[34]?$">'
- name: Ensure we have our own comment added to /etc/services
ansible.builtin.lineinfile:
path: /etc/services
regexp: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
- name: Add a line to a file if the file does not exist, without passing regexp
ansible.builtin.lineinfile:
path: /tmp/testfile
line: 192.168.1.99 foo.lab.net foo
create: yes
# NOTE: Yaml requires escaping backslashes in double quotes but not in single quotes
- name: Ensure the JBoss memory settings are exactly as needed
ansible.builtin.lineinfile:
path: /opt/jboss-as/bin/standalone.conf
regexp: '^(.*)Xms(\d+)m(.*)$'
line: '\1Xms${xms}m\3'
backrefs: yes
# NOTE: Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- name: Validate the sudoers file before saving
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
regexp: '^%ADMIN ALL='
line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
validate: /usr/sbin/visudo -cf %s
# See https://docs.python.org/3/library/re.html for further details on syntax
- name: Use backrefs with alternative group syntax to avoid conflicts with variable values
ansible.builtin.lineinfile:
path: /tmp/config
regexp: ^(host=).*
line: \g<1>{{ hostname }}
backrefs: yes
"""
RETURN = r"""#"""
import os
import re
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
def write_changes(module, b_lines, dest):
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
with os.fdopen(tmpfd, 'wb') as f:
f.writelines(b_lines)
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile,
to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message, diff):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False, diff=diff):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def present(module, dest, regexp, search_string, line, insertafter, insertbefore, create,
backup, backrefs, firstmatch):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
b_destpath = os.path.dirname(b_dest)
if b_destpath and not os.path.exists(b_destpath) and not module.check_mode:
try:
os.makedirs(b_destpath)
except Exception as e:
module.fail_json(msg='Error creating %s (%s)' % (to_text(b_destpath), to_text(e)))
b_lines = []
else:
with open(b_dest, 'rb') as f:
b_lines = f.readlines()
if module._diff:
diff['before'] = to_native(b''.join(b_lines))
if regexp is not None:
bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
if insertafter not in (None, 'BOF', 'EOF'):
bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
elif insertbefore not in (None, 'BOF'):
bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
else:
bre_ins = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/insertbefore has been found
index = [-1, -1]
match = None
exact_line_match = False
b_line = to_bytes(line, errors='surrogate_or_strict')
# The module's doc says
# "If regular expressions are passed to both regexp and
# insertafter, insertafter is only honored if no match for regexp is found."
# Therefore:
# 1. regexp or search_string was found -> ignore insertafter, replace the founded line
# 2. regexp or search_string was not found -> insert the line after 'insertafter' or 'insertbefore' line
# Given the above:
# 1. First check that there is no match for regexp:
if regexp is not None:
for lineno, b_cur_line in enumerate(b_lines):
match_found = bre_m.search(b_cur_line)
if match_found:
index[0] = lineno
match = match_found
if firstmatch:
break
# 2. Second check that there is no match for search_string:
if search_string is not None:
for lineno, b_cur_line in enumerate(b_lines):
match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
if match_found:
index[0] = lineno
match = match_found
if firstmatch:
break
# 3. When no match found on the previous step,
# parse for searching insertafter/insertbefore:
if not match:
for lineno, b_cur_line in enumerate(b_lines):
if b_line == b_cur_line.rstrip(b'\r\n'):
index[0] = lineno
exact_line_match = True
elif bre_ins is not None and bre_ins.search(b_cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if firstmatch:
break
if insertbefore:
# index[1] for the previous line
index[1] = lineno
if firstmatch:
break
msg = ''
changed = False
b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
# Exact line or Regexp matched a line in the file
if index[0] != -1:
if backrefs and match:
b_new_line = match.expand(b_line)
else:
# Don't do backref expansion if not asked.
b_new_line = b_line
if not b_new_line.endswith(b_linesep):
b_new_line += b_linesep
# If no regexp or search_string was given and no line match is found anywhere in the file,
# insert the line appropriately if using insertbefore or insertafter
if (regexp, search_string, match) == (None, None, None) and not exact_line_match:
# Insert lines
if insertafter and insertafter != 'EOF':
# Ensure there is a line separator after the found string
# at the end of the file.
if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
b_lines[-1] = b_lines[-1] + b_linesep
# If the line to insert after is at the end of the file
# use the appropriate index value.
if len(b_lines) == index[1]:
if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[1]].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif insertbefore and insertbefore != 'BOF':
# If the line to insert before is at the beginning of the file
# use the appropriate index value.
if index[1] <= 0:
if b_lines[index[1]].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
elif b_lines[index[0]] != b_new_line:
b_lines[index[0]] = b_new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
b_lines.insert(0, b_line + b_linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
b_lines.append(b_linesep)
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif insertafter and index[1] != -1:
# Don't insert the line if it already matches at the index.
# If the line to insert after is at the end of the file use the appropriate index value.
if len(b_lines) == index[1]:
if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
b_lines.append(b_line + b_linesep)
msg = 'line added'
changed = True
elif b_line != b_lines[index[1]].rstrip(b'\n\r'):
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
# insert matched, but not the regexp or search_string
else:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
changed = True
if module._diff:
diff['after'] = to_native(b''.join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(b_dest):
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if module.check_mode and not os.path.exists(b_dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
def absent(module, dest, regexp, search_string, line, backup):
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
module.exit_json(changed=False, msg="file not present")
msg = ''
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
with open(b_dest, 'rb') as f:
b_lines = f.readlines()
if module._diff:
diff['before'] = to_native(b''.join(b_lines))
if regexp is not None:
bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
found = []
b_line = to_bytes(line, errors='surrogate_or_strict')
def matcher(b_cur_line):
if regexp is not None:
match_found = bre_c.search(b_cur_line)
elif search_string is not None:
match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
else:
match_found = b_line == b_cur_line.rstrip(b'\r\n')
if match_found:
found.append(b_cur_line)
return not match_found
b_lines = [l for l in b_lines if matcher(l)]
changed = len(found) > 0
if module._diff:
diff['after'] = to_native(b''.join(b_lines))
backupdest = ""
if changed and not module.check_mode:
if backup:
backupdest = module.backup_local(dest)
write_changes(module, b_lines, dest)
if changed:
msg = "%s line(s) removed" % len(found)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
regexp=dict(type='str', aliases=['regex']),
search_string=dict(type='str'),
line=dict(type='str', aliases=['value']),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
backrefs=dict(type='bool', default=False),
create=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
firstmatch=dict(type='bool', default=False),
validate=dict(type='str'),
),
mutually_exclusive=[
['insertbefore', 'insertafter'], ['regexp', 'search_string'], ['backrefs', 'search_string']],
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
create = params['create']
backup = params['backup']
backrefs = params['backrefs']
path = params['path']
firstmatch = params['firstmatch']
regexp = params['regexp']
search_string = params['search_string']
line = params['line']
if '' in [regexp, search_string]:
msg = ("The %s is an empty string, which will match every line in the file. "
"This may have unintended consequences, such as replacing the last line in the file rather than appending.")
param_name = 'search string'
if regexp == '':
param_name = 'regular expression'
msg += " If this is desired, use '^' to match every line in the file and avoid this warning."
module.warn(msg % param_name)
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.isdir(b_path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if params['state'] == 'present':
if backrefs and regexp is None:
module.fail_json(msg='regexp is required with backrefs=true')
if line is None:
module.fail_json(msg='line is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
present(module, path, regexp, search_string, line,
ins_aft, ins_bef, create, backup, backrefs, firstmatch)
else:
if (regexp, search_string, line) == (None, None, None):
module.fail_json(msg='one of line, search_string, or regexp is required with state=absent')
absent(module, path, regexp, search_string, line, backup)
if __name__ == '__main__':
main()
| 23,734
|
Python
|
.py
| 539
| 36.022263
| 148
| 0.6308
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,163
|
get_url.py
|
ansible_ansible/lib/ansible/modules/get_url.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: get_url
short_description: Downloads files from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
server I(must) have direct access to the remote resource.
- By default, if an environment variable E(<protocol>_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see R(setting the environment,playbooks_environment)),
or by using the use_proxy option.
- HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
your proxy environment for both protocols is correct.
- From Ansible 2.4 when run with C(--check), it will do a HEAD request to validate the URL but
will not download the entire file or verify it against hashes and will report incorrect changed status.
- For Windows targets, use the M(ansible.windows.win_get_url) module instead.
version_added: '0.6'
options:
ciphers:
description:
- SSL/TLS Ciphers to use for the request.
- 'When a list is provided, all ciphers are joined in order with C(:).'
- See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
for more details.
- The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions.
type: list
elements: str
version_added: '2.14'
decompress:
description:
- Whether to attempt to decompress gzip content-encoded responses.
type: bool
default: true
version_added: '2.14'
url:
description:
- HTTP, HTTPS, or FTP URL in the form C((http|https|ftp)://[user[:pass]]@host.domain[:port]/path).
type: str
required: true
dest:
description:
- Absolute path of where to download the file to.
- If O(dest) is a directory, either the server provided filename or, if
none provided, the base name of the URL on the remote server will be
used. If a directory, O(force) has no effect.
- If O(dest) is a directory, the file will always be downloaded
(regardless of the O(force) and O(checksum) option), but
replaced only if the contents changed.
type: path
required: true
tmp_dest:
description:
- Absolute path of where temporary file is downloaded to.
- When run on Ansible 2.5 or greater, path defaults to ansible's C(remote_tmp) setting.
- When run on Ansible prior to 2.5, it defaults to E(TMPDIR), E(TEMP) or E(TMP) env variables or a platform specific value.
- U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir).
type: path
version_added: '2.1'
force:
description:
- If V(true) and O(dest) is not a directory, will download the file every
time and replace the file if the contents change. If V(false), the file
will only be downloaded if the destination does not exist. Generally
should be V(true) only for small local files.
- Prior to 0.6, this module behaved as if V(true) was the default.
type: bool
default: no
version_added: '0.7'
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
version_added: '2.1'
checksum:
description:
- 'If a checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
Format: <algorithm>:<checksum|url>, for example C(checksum="sha256:D98291AC[...]B6DC7B97",
C(checksum="sha256:http://example.com/path/sha256sum.txt").'
- If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions.
- The Python C(hashlib) module is responsible for providing the available algorithms.
The choices vary based on Python version and OpenSSL version.
- On systems running in FIPS compliant mode, the C(md5) algorithm may be unavailable.
- Additionally, if a checksum is passed to this parameter, and the file exist under
the O(dest) location, the C(destination_checksum) would be calculated, and if
checksum equals C(destination_checksum), the file download would be skipped
(unless O(force=true)). If the checksum does not equal C(destination_checksum),
the destination file is deleted.
- If the checksum URL requires username and password, O(url_username) and O(url_password) are used
to download the checksum file.
type: str
default: ''
version_added: "2.0"
use_proxy:
description:
- if V(false), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If V(false), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: yes
timeout:
description:
- Timeout in seconds for URL request.
type: int
default: 10
version_added: '1.8'
headers:
description:
- Add custom HTTP headers to a request in hash/dict format.
- The hash/dict format was added in Ansible 2.6.
- Previous versions used a C("key:value,key:value") string format.
- The C("key:value,key:value") string format is deprecated and has been removed in version 2.10.
type: dict
version_added: '2.0'
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without O(url_password) for sites that allow empty passwords.
- Since version 2.8 you can also use the O(username) alias for this option.
type: str
aliases: ['username']
version_added: '1.6'
url_password:
description:
- The password for use in HTTP basic authentication.
- If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used.
- Since version 2.8 you can also use the O(password) alias for this option.
type: str
aliases: ['password']
version_added: '1.6'
force_basic_auth:
description:
- Force the sending of the Basic authentication header upon initial request.
- httplib2, the library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail.
type: bool
default: no
version_added: '2.0'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, O(client_key) is not required.
type: path
version_added: '2.4'
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If O(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '2.4'
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
unredirected_headers:
description:
- A list of header names that will not be sent on subsequent redirected requests. This list is case
insensitive. By default all headers will be redirected. In some cases it may be beneficial to list
headers such as C(Authorization) here to avoid potential credential exposure.
default: []
type: list
elements: str
version_added: '2.12'
use_gssapi:
description:
- Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with O(url_username)/O(url_password) or with the GSSAPI env var
E(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is I(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
use_netrc:
description:
- Determining whether to use credentials from C(~/.netrc) file.
- By default C(.netrc) is used with Basic authentication headers.
- When V(false), C(.netrc) credentials are ignored.
type: bool
default: true
version_added: '2.14'
# informational: requirements for nodes
extends_documentation_fragment:
- files
- action_common_attributes
attributes:
check_mode:
details: the changed status will reflect comparison to an empty source file
support: partial
diff_mode:
support: none
platform:
platforms: posix
notes:
- For Windows targets, use the M(ansible.windows.win_get_url) module instead.
seealso:
- module: ansible.builtin.uri
- module: ansible.windows.win_get_url
author:
- Jan-Piet Mens (@jpmens)
"""
EXAMPLES = r"""
- name: Download foo.conf
ansible.builtin.get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
mode: '0440'
- name: Download file and force basic auth
ansible.builtin.get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
force_basic_auth: yes
- name: Download file with custom HTTP headers
ansible.builtin.get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
headers:
key1: one
key2: two
- name: Download file with check (sha256)
ansible.builtin.get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
- name: Download file with check (md5)
ansible.builtin.get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
checksum: md5:66dffb5228a211e61d6d7ef4a86f5758
- name: Download file with checksum url (sha256)
ansible.builtin.get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
checksum: sha256:http://example.com/path/sha256sum.txt
- name: Download file from a file path
ansible.builtin.get_url:
url: file:///tmp/a_file.txt
dest: /tmp/afilecopy.txt
- name: < Fetch file that requires authentication.
username/password only available since 2.8, in older versions you need to use url_username/url_password
ansible.builtin.get_url:
url: http://example.com/path/file.conf
dest: /etc/foo.conf
username: bar
password: '{{ mysecret }}'
"""
RETURN = r"""
backup_file:
description: name of backup file created after download
returned: changed and if backup=yes
type: str
sample: /path/to/file.txt.2015-02-12@22:09~
checksum_dest:
description: sha1 checksum of the file after copy
returned: success
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
checksum_src:
description: sha1 checksum of the file
returned: success
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
dest:
description: destination file/path
returned: success
type: str
sample: /path/to/file.txt
elapsed:
description: The number of seconds that elapsed while performing the download
returned: always
type: int
sample: 23
gid:
description: group id of the file
returned: success
type: int
sample: 100
group:
description: group of the file
returned: success
type: str
sample: "httpd"
md5sum:
description: md5 checksum of the file after download
returned: when supported
type: str
sample: "2a5aeecc61dc98c4d780b14b330e3282"
mode:
description: permissions of the target
returned: success
type: str
sample: "0644"
msg:
description: the HTTP message from the request
returned: always
type: str
sample: OK (unknown bytes)
owner:
description: owner of the file
returned: success
type: str
sample: httpd
secontext:
description: the SELinux security context of the file
returned: success
type: str
sample: unconfined_u:object_r:user_tmp_t:s0
size:
description: size of the target
returned: success
type: int
sample: 1220
src:
description: source file used after download
returned: always
type: str
sample: /tmp/tmpAdFLdV
state:
description: state of the target
returned: success
type: str
sample: file
status_code:
description: the HTTP status code from the request
returned: always
type: int
sample: 200
uid:
description: owner id of the file, after execution
returned: success
type: int
sample: 100
url:
description: the actual URL used for the request
returned: always
type: str
sample: https://www.ansible.com/
"""
import email.message
import os
import re
import shutil
import tempfile
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.module_utils.compat.datetime import utcnow, utcfromtimestamp
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.urls import fetch_url, url_argument_spec
# ==============================================================
# url handling
def url_filename(url):
fn = os.path.basename(urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest='', method='GET', unredirected_headers=None,
decompress=True, ciphers=None, use_netrc=True):
"""
Download data from the url and store in a temporary file.
Return (tempfile, info about the request)
"""
start = utcnow()
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method,
unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
elapsed = (utcnow() - start).seconds
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''), status_code=info['status'], elapsed=elapsed)
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
if info['status'] == -1:
module.fail_json(msg=info['msg'], url=url, dest=dest, elapsed=elapsed)
if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')):
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest, elapsed=elapsed)
# create a temporary file and copy content to do checksum-based replacement
if tmp_dest:
# tmp_dest should be an existing dir
tmp_dest_is_dir = os.path.isdir(tmp_dest)
if not tmp_dest_is_dir:
if os.path.exists(tmp_dest):
module.fail_json(msg="%s is a file but should be a directory." % tmp_dest, elapsed=elapsed)
else:
module.fail_json(msg="%s directory does not exist." % tmp_dest, elapsed=elapsed)
else:
tmp_dest = module.tmpdir
fd, tempname = tempfile.mkstemp(dir=tmp_dest)
f = os.fdopen(fd, 'wb')
try:
shutil.copyfileobj(rsp, f)
except Exception as e:
os.remove(tempname)
module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), elapsed=elapsed, exception=traceback.format_exc())
f.close()
rsp.close()
return tempname, info
def extract_filename_from_headers(headers):
"""Extracts a filename from the given dict of HTTP headers.
Returns the filename if successful, else None.
"""
msg = email.message.Message()
msg['content-disposition'] = headers.get('content-disposition', '')
if filename := msg.get_param('filename', header='content-disposition'):
# Avoid directory traversal
filename = os.path.basename(filename)
return filename
def is_url(checksum):
"""
Returns True if checksum value has supported URL scheme, else False."""
supported_schemes = ('http', 'https', 'ftp', 'file')
return urlsplit(checksum).scheme in supported_schemes
# ==============================================================
# main
def main():
argument_spec = url_argument_spec()
# setup aliases
argument_spec['url_username']['aliases'] = ['username']
argument_spec['url_password']['aliases'] = ['password']
argument_spec.update(
url=dict(type='str', required=True),
dest=dict(type='path', required=True),
backup=dict(type='bool', default=False),
checksum=dict(type='str', default=''),
timeout=dict(type='int', default=10),
headers=dict(type='dict'),
tmp_dest=dict(type='path'),
unredirected_headers=dict(type='list', elements='str', default=[]),
decompress=dict(type='bool', default=True),
ciphers=dict(type='list', elements='str'),
use_netrc=dict(type='bool', default=True),
)
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
url = module.params['url']
dest = module.params['dest']
backup = module.params['backup']
force = module.params['force']
checksum = module.params['checksum']
use_proxy = module.params['use_proxy']
timeout = module.params['timeout']
headers = module.params['headers']
tmp_dest = module.params['tmp_dest']
unredirected_headers = module.params['unredirected_headers']
decompress = module.params['decompress']
ciphers = module.params['ciphers']
use_netrc = module.params['use_netrc']
result = dict(
changed=False,
checksum_dest=None,
checksum_src=None,
dest=dest,
elapsed=0,
url=url,
)
dest_is_dir = os.path.isdir(dest)
last_mod_time = None
# checksum specified, parse for algorithm and checksum
if checksum:
try:
algorithm, checksum = checksum.split(':', 1)
except ValueError:
module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
if is_url(checksum):
checksum_url = checksum
# download checksum file to checksum_tmpsrc
checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest,
unredirected_headers=unredirected_headers, ciphers=ciphers, use_netrc=use_netrc)
with open(checksum_tmpsrc) as f:
lines = [line.rstrip('\n') for line in f]
os.remove(checksum_tmpsrc)
checksum_map = []
filename = url_filename(url)
if len(lines) == 1 and len(lines[0].split()) == 1:
# Only a single line with a single string
# treat it as a checksum only file
checksum_map.append((lines[0], filename))
else:
# The assumption here is the file is in the format of
# checksum filename
for line in lines:
# Split by one whitespace to keep the leading type char ' ' (whitespace) for text and '*' for binary
parts = line.split(" ", 1)
if len(parts) == 2:
# Remove the leading type char, we expect
if parts[1].startswith((" ", "*",)):
parts[1] = parts[1][1:]
# Append checksum and path without potential leading './'
checksum_map.append((parts[0], parts[1].lstrip("./")))
# Look through each line in the checksum file for a hash corresponding to
# the filename in the url, returning the first hash that is found.
for cksum in (s for (s, f) in checksum_map if f == filename):
checksum = cksum
break
else:
checksum = None
if checksum is None:
module.fail_json(msg="Unable to find a checksum for file '%s' in '%s'" % (filename, checksum_url))
# Remove any non-alphanumeric characters, including the infamous
# Unicode zero-width space
checksum = re.sub(r'\W+', '', checksum).lower()
# Ensure the checksum portion is a hexdigest
try:
int(checksum, 16)
except ValueError:
module.fail_json(msg='The checksum format is invalid', **result)
if not dest_is_dir and os.path.exists(dest):
checksum_mismatch = False
# If the download is not forced and there is a checksum, allow
# checksum match to skip the download.
if not force and checksum != '':
destination_checksum = module.digest_from_file(dest, algorithm)
if checksum != destination_checksum:
checksum_mismatch = True
# Not forcing redownload, unless checksum does not match
if not force and checksum and not checksum_mismatch:
# Not forcing redownload, unless checksum does not match
# allow file attribute changes
file_args = module.load_file_common_arguments(module.params, path=dest)
result['changed'] = module.set_fs_attributes_if_different(file_args, False)
if result['changed']:
module.exit_json(msg="file already exists but file attributes changed", **result)
module.exit_json(msg="file already exists", **result)
# If the file already exists, prepare the last modified time for the
# request.
mtime = os.path.getmtime(dest)
last_mod_time = utcfromtimestamp(mtime)
# If the checksum does not match we have to force the download
# because last_mod_time may be newer than on remote
if checksum_mismatch:
force = True
# download to tmpsrc
start = utcnow()
method = 'HEAD' if module.check_mode else 'GET'
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method,
unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
result['elapsed'] = (utcnow() - start).seconds
result['src'] = tmpsrc
# Now the request has completed, we can finally generate the final
# destination file name from the info dict.
if dest_is_dir:
filename = extract_filename_from_headers(info)
if not filename:
# Fall back to extracting the filename from the URL.
# Pluck the URL from the info, since a redirect could have changed
# it.
filename = url_filename(info['url'])
dest = os.path.join(dest, filename)
result['dest'] = dest
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], **result)
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Source %s is not readable" % (tmpsrc), **result)
result['checksum_src'] = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (dest), **result)
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not readable" % (dest), **result)
result['checksum_dest'] = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(dest)):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s does not exist" % (os.path.dirname(dest)), **result)
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s is not writable" % (os.path.dirname(dest)), **result)
if module.check_mode:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
result['changed'] = ('checksum_dest' not in result or
result['checksum_src'] != result['checksum_dest'])
module.exit_json(msg=info.get('msg', ''), **result)
# If a checksum was provided, ensure that the temporary file matches this checksum
# before moving it to the destination.
if checksum != '':
tmpsrc_checksum = module.digest_from_file(tmpsrc, algorithm)
if checksum != tmpsrc_checksum:
os.remove(tmpsrc)
module.fail_json(msg=f"The checksum for {tmpsrc} did not match {checksum}; it was {tmpsrc_checksum}.", **result)
# Copy temporary file to destination if necessary
backup_file = None
if result['checksum_src'] != result['checksum_dest']:
try:
if backup:
if os.path.exists(dest):
backup_file = module.backup_local(dest)
module.atomic_move(tmpsrc, dest, unsafe_writes=module.params['unsafe_writes'])
except Exception as e:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
exception=traceback.format_exc(), **result)
result['changed'] = True
else:
result['changed'] = False
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
# allow file attribute changes
file_args = module.load_file_common_arguments(module.params, path=dest)
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
# Backwards compat only. We'll return None on FIPS enabled systems
try:
result['md5sum'] = module.md5(dest)
except ValueError:
result['md5sum'] = None
if backup_file:
result['backup_file'] = backup_file
# Mission complete
module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result)
if __name__ == '__main__':
main()
| 27,145
|
Python
|
.py
| 637
| 35.657771
| 150
| 0.665759
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,164
|
dpkg_selections.py
|
ansible_ansible/lib/ansible/modules/dpkg_selections.py
|
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: dpkg_selections
short_description: Dpkg package selection selections
description:
- Change dpkg package selection state via C(--get-selections) and C(--set-selections).
version_added: "2.0"
author:
- Brian Brazil (@brian-brazil) <brian.brazil@boxever.com>
options:
name:
description:
- Name of the package.
required: true
type: str
selection:
description:
- The selection state to set the package to.
choices: [ 'install', 'hold', 'deinstall', 'purge' ]
required: true
type: str
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
support: full
platforms: debian
notes:
- This module will not cause any packages to be installed/removed/purged, use the M(ansible.builtin.apt) module for that.
"""
EXAMPLES = """
- name: Prevent python from being upgraded
ansible.builtin.dpkg_selections:
name: python
selection: hold
- name: Allow python to be upgraded
ansible.builtin.dpkg_selections:
name: python
selection: install
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
selection=dict(choices=['install', 'hold', 'deinstall', 'purge'], required=True)
),
supports_check_mode=True,
)
dpkg = module.get_bin_path('dpkg', True)
locale = get_best_parsable_locale(module)
DPKG_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale)
module.run_command_environ_update = DPKG_ENV
name = module.params['name']
selection = module.params['selection']
# Get current settings.
rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
if 'no packages found matching' in err:
module.fail_json(msg="Failed to find package '%s' to perform selection '%s'." % (name, selection))
elif not out:
current = 'not present'
else:
current = out.split()[1]
changed = current != selection
if module.check_mode or not changed:
module.exit_json(changed=changed, before=current, after=selection)
module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
module.exit_json(changed=changed, before=current, after=selection)
if __name__ == '__main__':
main()
| 2,805
|
Python
|
.py
| 80
| 29.9625
| 125
| 0.680694
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,165
|
copy.py
|
ansible_ansible/lib/ansible/modules/copy.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: copy
version_added: historical
short_description: Copy files to remote locations
description:
- The M(ansible.builtin.copy) module copies a file or a directory structure from the local or remote machine to a location on the remote machine.
File system meta-information (permissions, ownership, etc.) may be set, even when the file or directory already exists on the target system.
Some meta-information may be copied on request.
- Get meta-information with the M(ansible.builtin.stat) module.
- Set meta-information with the M(ansible.builtin.file) module.
- Use the M(ansible.builtin.fetch) module to copy files from remote locations to the local box.
- If you need variable interpolation in copied files, use the M(ansible.builtin.template) module.
Using a variable with the O(content) parameter produces unpredictable results.
- For Windows targets, use the M(ansible.windows.win_copy) module instead.
options:
src:
description:
- Local path to a file to copy to the remote server.
- This can be absolute or relative.
- If path is a directory, it is copied recursively. In this case, if path ends
with C(/), only inside contents of that directory are copied to destination.
Otherwise, if it does not end with C(/), the directory itself with all contents
is copied. This behavior is similar to the C(rsync) command line tool.
type: path
content:
description:
- When used instead of O(src), sets the contents of a file directly to the specified value.
- Works only when O(dest) is a file. Creates the file if it does not exist.
- For advanced formatting or if O(content) contains a variable, use the
M(ansible.builtin.template) module.
type: str
version_added: '1.1'
dest:
description:
- Remote absolute path where the file should be copied to.
- If O(src) is a directory, this must be a directory too.
- If O(dest) is a non-existent path and if either O(dest) ends with C(/) or O(src) is a directory, O(dest) is created.
- If O(dest) is a relative path, the starting directory is determined by the remote host.
- If O(src) and O(dest) are files, the parent directory of O(dest) is not created and the task fails if it does not already exist.
type: path
required: yes
backup:
description:
- Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
version_added: '0.7'
force:
description:
- Influence whether the remote file must always be replaced.
- If V(true), the remote file will be replaced when contents are different than the source.
- If V(false), the file will only be transferred if the destination does not exist.
type: bool
default: yes
version_added: '1.1'
mode:
description:
- The permissions of the destination file or directory.
- For those used to C(/usr/bin/chmod) remember that modes are actually octal numbers.
You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
(like V(0644) or V(01777)) or quote it (like V('644') or V('1777')) so Ansible receives a string
and can do its own conversion from string into number. Giving Ansible a number without following
one of these rules will end up with a decimal number which will have unexpected results.
- As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, V(u+rwx) or V(u=rw,g=r,o=r)).
- As of Ansible 2.3, the mode may also be the special string V(preserve).
- V(preserve) means that the file will be given the same permissions as the source file.
- When doing a recursive copy, see also O(directory_mode).
- If O(mode) is not specified and the destination file B(does not) exist, the default C(umask) on the system will be used
when setting the mode for the newly created file.
- If O(mode) is not specified and the destination file B(does) exist, the mode of the existing file will be used.
- Specifying O(mode) is the best way to ensure files are created with the correct permissions.
See CVE-2020-1736 for further details.
directory_mode:
description:
- Set the access permissions of newly created directories to the given mode.
Permissions on existing directories do not change.
- See O(mode) for the syntax of accepted values.
- The target system's defaults determine permissions when this parameter is not set.
type: raw
version_added: '1.5'
remote_src:
description:
- Influence whether O(src) needs to be transferred or already is present remotely.
- If V(false), it will search for O(src) on the controller node.
- If V(true), it will search for O(src) on the managed (remote) node.
- O(remote_src) supports recursive copying as of version 2.8.
- O(remote_src) only works with O(mode=preserve) as of version 2.6.
- Auto-decryption of files does not work when O(remote_src=yes).
type: bool
default: no
version_added: '2.0'
follow:
description:
- This flag indicates that filesystem links in the destination, if they exist, should be followed.
type: bool
default: no
version_added: '1.8'
local_follow:
description:
- This flag indicates that filesystem links in the source tree, if they exist, should be followed.
type: bool
version_added: '2.4'
checksum:
description:
- SHA1 checksum of the file being transferred.
- Used to validate that the copy of the file was successful.
- If this is not provided, ansible will use the local calculated checksum of the src file.
type: str
version_added: '2.5'
extends_documentation_fragment:
- decrypt
- files
- validate
- action_common_attributes
- action_common_attributes.files
- action_common_attributes.flow
notes:
- The M(ansible.builtin.copy) module recursively copy facility does not scale to lots (>hundreds) of files.
seealso:
- module: ansible.builtin.assemble
- module: ansible.builtin.fetch
- module: ansible.builtin.file
- module: ansible.builtin.template
- module: ansible.posix.synchronize
- module: ansible.windows.win_copy
author:
- Ansible Core Team
- Michael DeHaan
attributes:
action:
support: full
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix
safe_file_operations:
support: full
vault:
support: full
version_added: '2.2'
"""
EXAMPLES = r"""
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: '0644'
- name: Copy file with owner and permission, using symbolic representation
ansible.builtin.copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: u=rw,g=r,o=r
- name: Another symbolic mode example, adding some permissions and removing others
ansible.builtin.copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: u+rw,g-wx,o-rwx
- name: Copy a new "ntp.conf" file into place, backing up the original if it differs from the copied version
ansible.builtin.copy:
src: /mine/ntp.conf
dest: /etc/ntp.conf
owner: root
group: root
mode: '0644'
backup: yes
- name: Copy a new "sudoers" file into place, after passing validation with visudo
ansible.builtin.copy:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -csf %s
- name: Copy a "sudoers" file on the remote machine for editing
ansible.builtin.copy:
src: /etc/sudoers
dest: /etc/sudoers.edit
remote_src: yes
validate: /usr/sbin/visudo -csf %s
- name: Copy using inline content
ansible.builtin.copy:
content: '# This file was moved to /etc/other.conf'
dest: /etc/mine.conf
- name: If follow=yes, /path/to/file will be overwritten by contents of foo.conf
ansible.builtin.copy:
src: /etc/foo.conf
dest: /path/to/link # link to /path/to/file
follow: yes
- name: If follow=no, /path/to/link will become a file and be overwritten by contents of foo.conf
ansible.builtin.copy:
src: /etc/foo.conf
dest: /path/to/link # link to /path/to/file
follow: no
"""
RETURN = r"""
dest:
description: Destination file/path.
returned: success
type: str
sample: /path/to/file.txt
src:
description: Source file used for the copy on the target machine.
returned: changed
type: str
sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
md5sum:
description: MD5 checksum of the file after running copy.
returned: when supported
type: str
sample: 2a5aeecc61dc98c4d780b14b330e3282
checksum:
description: SHA1 checksum of the file after running copy.
returned: success
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
backup_file:
description: Name of backup file created.
returned: changed and if backup=yes
type: str
sample: /path/to/file.txt.2015-02-12@22:09~
gid:
description: Group id of the file, after execution.
returned: success
type: int
sample: 100
group:
description: Group of the file, after execution.
returned: success
type: str
sample: httpd
owner:
description: Owner of the file, after execution.
returned: success
type: str
sample: httpd
uid:
description: Owner id of the file, after execution.
returned: success
type: int
sample: 100
mode:
description: Permissions of the target, after execution.
returned: success
type: str
sample: '0644'
size:
description: Size of the target, after execution.
returned: success
type: int
sample: 1220
state:
description: State of the target, after execution.
returned: success
type: str
sample: file
"""
import errno
import filecmp
import grp
import os
import os.path
import pwd
import shutil
import stat
import tempfile
import traceback
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.basic import AnsibleModule
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def split_pre_existing_dir(dirname):
"""
Return the first pre-existing directory and a list of the new directories that will be created.
"""
head, tail = os.path.split(dirname)
b_head = to_bytes(head, errors='surrogate_or_strict')
if head == '':
return ('.', [tail])
if not os.path.exists(b_head):
if head == '/':
raise AnsibleModuleError(results={'msg': "The '/' directory doesn't exist on this machine."})
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
return (head, [tail])
new_directory_list.append(tail)
return (pre_existing_dir, new_directory_list)
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
"""
Walk the new directories list and make sure that permissions are as we would expect
"""
if new_directory_list:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
def chown_recursive(path, module):
changed = False
owner = module.params['owner']
group = module.params['group']
if owner is not None:
if not module.check_mode:
for dirpath, dirnames, filenames in os.walk(path):
owner_changed = module.set_owner_if_different(dirpath, owner, False)
if owner_changed is True:
changed = owner_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
owner_changed = module.set_owner_if_different(dir, owner, False)
if owner_changed is True:
changed = owner_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
owner_changed = module.set_owner_if_different(file, owner, False)
if owner_changed is True:
changed = owner_changed
else:
uid = pwd.getpwnam(owner).pw_uid
for dirpath, dirnames, filenames in os.walk(path):
owner_changed = (os.stat(dirpath).st_uid != uid)
if owner_changed is True:
changed = owner_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
owner_changed = (os.stat(dir).st_uid != uid)
if owner_changed is True:
changed = owner_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
owner_changed = (os.stat(file).st_uid != uid)
if owner_changed is True:
changed = owner_changed
if group is not None:
if not module.check_mode:
for dirpath, dirnames, filenames in os.walk(path):
group_changed = module.set_group_if_different(dirpath, group, False)
if group_changed is True:
changed = group_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
group_changed = module.set_group_if_different(dir, group, False)
if group_changed is True:
changed = group_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
group_changed = module.set_group_if_different(file, group, False)
if group_changed is True:
changed = group_changed
else:
gid = grp.getgrnam(group).gr_gid
for dirpath, dirnames, filenames in os.walk(path):
group_changed = (os.stat(dirpath).st_gid != gid)
if group_changed is True:
changed = group_changed
for dir in [os.path.join(dirpath, d) for d in dirnames]:
group_changed = (os.stat(dir).st_gid != gid)
if group_changed is True:
changed = group_changed
for file in [os.path.join(dirpath, f) for f in filenames]:
group_changed = (os.stat(file).st_gid != gid)
if group_changed is True:
changed = group_changed
return changed
def copy_diff_files(src, dest, module):
"""Copy files that are different between `src` directory and `dest` directory."""
changed = False
owner = module.params['owner']
group = module.params['group']
local_follow = module.params['local_follow']
diff_files = filecmp.dircmp(src, dest).diff_files
if len(diff_files):
changed = True
if not module.check_mode:
for item in diff_files:
src_item_path = os.path.join(src, item)
dest_item_path = os.path.join(dest, item)
b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
if os.path.islink(b_src_item_path) and local_follow is False:
linkto = os.readlink(b_src_item_path)
os.symlink(linkto, b_dest_item_path)
else:
shutil.copyfile(b_src_item_path, b_dest_item_path)
shutil.copymode(b_src_item_path, b_dest_item_path)
if owner is not None:
module.set_owner_if_different(b_dest_item_path, owner, False)
if group is not None:
module.set_group_if_different(b_dest_item_path, group, False)
changed = True
return changed
def copy_left_only(src, dest, module):
"""Copy files that exist in `src` directory only to the `dest` directory."""
changed = False
owner = module.params['owner']
group = module.params['group']
local_follow = module.params['local_follow']
left_only = filecmp.dircmp(src, dest).left_only
if len(left_only):
changed = True
if not module.check_mode:
for item in left_only:
src_item_path = os.path.join(src, item)
dest_item_path = os.path.join(dest, item)
b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is True:
shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow)
chown_recursive(b_dest_item_path, module)
if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is False:
linkto = os.readlink(b_src_item_path)
os.symlink(linkto, b_dest_item_path)
if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is True:
shutil.copyfile(b_src_item_path, b_dest_item_path)
if owner is not None:
module.set_owner_if_different(b_dest_item_path, owner, False)
if group is not None:
module.set_group_if_different(b_dest_item_path, group, False)
if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is False:
linkto = os.readlink(b_src_item_path)
os.symlink(linkto, b_dest_item_path)
if not os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path):
shutil.copyfile(b_src_item_path, b_dest_item_path)
shutil.copymode(b_src_item_path, b_dest_item_path)
if owner is not None:
module.set_owner_if_different(b_dest_item_path, owner, False)
if group is not None:
module.set_group_if_different(b_dest_item_path, group, False)
if not os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path):
shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow)
chown_recursive(b_dest_item_path, module)
changed = True
return changed
def copy_common_dirs(src, dest, module):
changed = False
common_dirs = filecmp.dircmp(src, dest).common_dirs
for item in common_dirs:
src_item_path = os.path.join(src, item)
dest_item_path = os.path.join(dest, item)
b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
diff_files_changed = copy_diff_files(b_src_item_path, b_dest_item_path, module)
left_only_changed = copy_left_only(b_src_item_path, b_dest_item_path, module)
if diff_files_changed or left_only_changed:
changed = True
# recurse into subdirectory
changed = copy_common_dirs(os.path.join(src, item), os.path.join(dest, item), module) or changed
return changed
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(type='path'),
_original_basename=dict(type='str'), # used to handle 'dest is a directory' via template, a slight hack
content=dict(type='str', no_log=True),
dest=dict(type='path', required=True),
backup=dict(type='bool', default=False),
force=dict(type='bool', default=True),
validate=dict(type='str'),
directory_mode=dict(type='raw'),
remote_src=dict(type='bool', default=False),
local_follow=dict(type='bool'),
checksum=dict(type='str'),
follow=dict(type='bool', default=False),
),
add_file_common_args=True,
supports_check_mode=True,
)
src = module.params['src']
b_src = to_bytes(src, errors='surrogate_or_strict')
dest = module.params['dest']
# Make sure we always have a directory component for later processing
if os.path.sep not in dest:
dest = '.{0}{1}'.format(os.path.sep, dest)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
backup = module.params['backup']
force = module.params['force']
_original_basename = module.params.get('_original_basename', None)
validate = module.params.get('validate', None)
follow = module.params['follow']
local_follow = module.params['local_follow']
mode = module.params['mode']
owner = module.params['owner']
group = module.params['group']
remote_src = module.params['remote_src']
checksum = module.params['checksum']
if not os.path.exists(b_src):
module.fail_json(msg="Source %s not found" % (src))
if not os.access(b_src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src))
# Preserve is usually handled in the action plugin but mode + remote_src has to be done on the
# remote host
if module.params['mode'] == 'preserve':
module.params['mode'] = '0%03o' % stat.S_IMODE(os.stat(b_src).st_mode)
mode = module.params['mode']
changed = False
checksum_dest = None
checksum_src = None
md5sum_src = None
if os.path.isfile(src):
try:
checksum_src = module.sha1(src)
except (OSError, IOError) as e:
module.warn("Unable to calculate src checksum, assuming change: %s" % to_native(e))
try:
# Backwards compat only. This will be None in FIPS mode
md5sum_src = module.md5(src)
except ValueError:
pass
elif remote_src and not os.path.isdir(src):
module.fail_json("Cannot copy invalid source '%s': not a file" % to_native(src))
if checksum and checksum_src != checksum:
module.fail_json(
msg='Copied file does not match the expected checksum. Transfer failed.',
checksum=checksum_src,
expected_checksum=checksum
)
# Special handling for recursive copy - create intermediate dirs
if dest.endswith(os.sep):
if _original_basename:
dest = os.path.join(dest, _original_basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
dirname = os.path.dirname(dest)
b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
if not os.path.exists(b_dirname):
try:
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
except AnsibleModuleError as e:
e.result['msg'] += ' Could not copy to {0}'.format(dest)
module.fail_json(**e.results)
if module.check_mode:
module.exit_json(msg='dest directory %s would be created' % dirname, changed=True, src=src)
os.makedirs(b_dirname)
changed = True
directory_args = module.load_file_common_arguments(module.params)
directory_mode = module.params["directory_mode"]
if directory_mode is not None:
directory_args['mode'] = directory_mode
else:
directory_args['mode'] = None
adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
if os.path.isdir(b_dest):
basename = os.path.basename(src)
if _original_basename:
basename = _original_basename
dest = os.path.join(dest, basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
if os.path.islink(b_dest) and follow:
b_dest = os.path.realpath(b_dest)
dest = to_native(b_dest, errors='surrogate_or_strict')
if not force:
module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
if os.access(b_dest, os.R_OK) and os.path.isfile(b_dest):
checksum_dest = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(b_dest)):
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(b_dest))
except OSError as e:
if "permission denied" in to_native(e).lower():
module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
if not os.access(os.path.dirname(b_dest), os.W_OK) and not module.params['unsafe_writes']:
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None
if checksum_src != checksum_dest or os.path.islink(b_dest):
if not module.check_mode:
try:
if backup:
if os.path.exists(b_dest):
backup_file = module.backup_local(dest)
# allow for conversion from symlink.
if os.path.islink(b_dest):
os.unlink(b_dest)
open(b_dest, 'w').close()
if validate:
# if we have a mode, make sure we set it on the temporary
# file source as some validations may require it
if mode is not None:
module.set_mode_if_different(src, mode, False)
if owner is not None:
module.set_owner_if_different(src, owner, False)
if group is not None:
module.set_group_if_different(src, group, False)
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % src)
if rc != 0:
module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
b_mysrc = b_src
if remote_src and os.path.isfile(b_src):
dummy, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
shutil.copyfile(b_src, b_mysrc)
try:
shutil.copystat(b_src, b_mysrc)
except OSError as err:
if err.errno == errno.ENOSYS and mode == "preserve":
module.warn("Unable to copy stats {0}".format(to_native(b_src)))
else:
raise
# at this point we should always have tmp file
module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'], keep_dest_attrs=not remote_src)
except (IOError, OSError):
module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
changed = True
# If neither have checksums, both src and dest are directories.
if checksum_src is None and checksum_dest is None:
if remote_src and os.path.isdir(module.params['src']):
b_src = to_bytes(module.params['src'], errors='surrogate_or_strict')
b_dest = to_bytes(module.params['dest'], errors='surrogate_or_strict')
if src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
diff_files_changed = copy_diff_files(b_src, b_dest, module)
left_only_changed = copy_left_only(b_src, b_dest, module)
common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
owner_group_changed = chown_recursive(b_dest, module)
if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
changed = True
if src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
if not module.check_mode:
shutil.copytree(b_src, b_dest, symlinks=not local_follow)
chown_recursive(dest, module)
changed = True
if not src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
if not module.check_mode and not os.path.exists(b_dest):
shutil.copytree(b_src, b_dest, symlinks=not local_follow)
changed = True
chown_recursive(dest, module)
if module.check_mode and not os.path.exists(b_dest):
changed = True
if os.path.exists(b_dest):
diff_files_changed = copy_diff_files(b_src, b_dest, module)
left_only_changed = copy_left_only(b_src, b_dest, module)
common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
owner_group_changed = chown_recursive(b_dest, module)
if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
changed = True
if not src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
b_basename = to_bytes(os.path.basename(module.params['src']), errors='surrogate_or_strict')
b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
if not module.check_mode and not os.path.exists(b_dest):
os.makedirs(b_dest)
changed = True
b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
diff_files_changed = copy_diff_files(b_src, b_dest, module)
left_only_changed = copy_left_only(b_src, b_dest, module)
common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
owner_group_changed = chown_recursive(b_dest, module)
if module.check_mode and not os.path.exists(b_dest):
changed = True
res_args = dict(
dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
)
if backup_file:
res_args['backup_file'] = backup_file
file_args = module.load_file_common_arguments(module.params, path=dest)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| 32,104
|
Python
|
.py
| 679
| 37.952872
| 149
| 0.633661
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,166
|
assert.py
|
ansible_ansible/lib/ansible/modules/assert.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: assert
short_description: Asserts given expressions are true
description:
- This module asserts that given expressions are true with an optional custom message.
- This module is also supported for Windows targets.
version_added: "1.5"
options:
that:
description:
- A list of string expressions of the same form that can be passed to the C(when) statement.
type: list
elements: str
required: true
fail_msg:
description:
- The customized message used for a failing assertion.
- This argument was called O(msg) before Ansible 2.7, now it is renamed to O(fail_msg) with alias O(msg).
type: str
aliases: [ msg ]
version_added: "2.7"
success_msg:
description:
- The customized message used for a successful assertion.
type: str
version_added: "2.7"
quiet:
description:
- Set this to V(true) to avoid verbose output.
type: bool
default: no
version_added: "2.8"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
become:
support: none
bypass_host_loop:
support: none
connection:
support: none
check_mode:
support: full
delegation:
support: none
details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
diff_mode:
support: none
platform:
platforms: all
seealso:
- module: ansible.builtin.debug
- module: ansible.builtin.fail
- module: ansible.builtin.meta
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
- name: A single condition can be supplied as string instead of list
ansible.builtin.assert:
that: "ansible_os_family != 'RedHat'"
- name: Use yaml multiline strings to ease escaping
ansible.builtin.assert:
that:
- "'foo' in some_command_result.stdout"
- number_of_the_counting == 3
- >
"reject" not in some_command_result.stderr
- name: After version 2.7 both O(msg) and O(fail_msg) can customize failing assertion message
ansible.builtin.assert:
that:
- my_param <= 100
- my_param >= 0
fail_msg: "'my_param' must be between 0 and 100"
success_msg: "'my_param' is between 0 and 100"
- name: Please use O(msg) when ansible version is smaller than 2.7
ansible.builtin.assert:
that:
- my_param <= 100
- my_param >= 0
msg: "'my_param' must be between 0 and 100"
- name: Use quiet to avoid verbose output
ansible.builtin.assert:
that:
- my_param <= 100
- my_param >= 0
quiet: true
"""
| 2,954
|
Python
|
.py
| 100
| 24.99
| 111
| 0.680141
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,167
|
add_host.py
|
ansible_ansible/lib/ansible/modules/add_host.py
|
# -*- mode: python -*-
# Copyright: (c) 2012, Seth Vidal (@skvidal)
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: add_host
short_description: Add a host (and alternatively a group) to the ansible-playbook in-memory inventory
description:
- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
- Takes variables so you can define the new hosts more fully.
- This module is also supported for Windows targets.
version_added: "0.9"
options:
name:
description:
- The hostname/ip of the host to add to the inventory, can include a colon and a port number.
type: str
required: true
aliases: [ host, hostname ]
groups:
description:
- The groups to add the hostname to.
type: list
elements: str
aliases: [ group, groupname ]
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
attributes:
action:
support: full
core:
details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
support: partial
become:
support: none
bypass_host_loop:
support: full
bypass_task_loop:
support: none
check_mode:
details: While this makes no changes to target systems the 'in memory' inventory will still be altered
support: partial
connection:
support: none
delegation:
support: none
diff_mode:
support: none
platform:
platforms: all
notes:
- The alias O(host) of the parameter O(name) is only available on Ansible 2.4 and newer.
- Since Ansible 2.4, the C(inventory_dir) variable is now set to V(None) instead of the 'global inventory source',
because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
- Though this module does not change the remote host, we do provide C(changed) status as it can be useful for those trying to track inventory changes.
- The hosts added will not bypass the C(--limit) from the command line, so both of those need to be in agreement to make them available as play targets.
They are still available from hostvars and for delegation as a normal part of the inventory.
seealso:
- module: ansible.builtin.group_by
author:
- Ansible Core Team
- Seth Vidal (@skvidal)
"""
EXAMPLES = r"""
- name: Add host to group 'just_created' with variable foo=42
ansible.builtin.add_host:
name: '{{ ip_from_ec2 }}'
groups: just_created
foo: 42
- name: Add host to multiple groups
ansible.builtin.add_host:
hostname: '{{ new_ip }}'
groups:
- group1
- group2
- name: Add a host with a non-standard port local to your machines
ansible.builtin.add_host:
name: '{{ new_ip }}:{{ new_port }}'
- name: Add a host alias that we reach through a tunnel (Ansible 1.9 and older)
ansible.builtin.add_host:
hostname: '{{ new_ip }}'
ansible_ssh_host: '{{ inventory_hostname }}'
ansible_ssh_port: '{{ new_port }}'
- name: Add a host alias that we reach through a tunnel (Ansible 2.0 and newer)
ansible.builtin.add_host:
hostname: '{{ new_ip }}'
ansible_host: '{{ inventory_hostname }}'
ansible_port: '{{ new_port }}'
- name: Ensure inventory vars are set to the same value as the inventory_hostname has (close to pre Ansible 2.4 behaviour)
ansible.builtin.add_host:
hostname: charlie
inventory_dir: '{{ inventory_dir }}'
- name: Add all hosts running this playbook to the done group
ansible.builtin.add_host:
name: '{{ item }}'
groups: done
loop: "{{ ansible_play_hosts }}"
"""
| 3,859
|
Python
|
.py
| 103
| 33.699029
| 152
| 0.714019
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,168
|
package.py
|
ansible_ansible/lib/ansible/modules/package.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: package
version_added: 2.0
author:
- Ansible Core Team
short_description: Generic OS package manager
description:
- This modules manages packages on a target without specifying a package manager module (like M(ansible.builtin.dnf), M(ansible.builtin.apt), ...).
It is convenient to use in an heterogeneous environment of machines without having to create a specific task for
each package manager. M(ansible.builtin.package) calls behind the module for the package manager used by the operating system
discovered by the module M(ansible.builtin.setup). If M(ansible.builtin.setup) was not yet run, M(ansible.builtin.package) will run it.
- This module acts as a proxy to the underlying package manager module. While all arguments will be passed to the
underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
of module arguments that all packaging modules support.
- For Windows targets, use the M(ansible.windows.win_package) module instead.
options:
name:
description:
- Package name, or package specifier with version.
- Syntax varies with package manager. For example V(name-1.0) or V(name=1.0).
- Package names also vary with package manager; this module will not "translate" them per distribution. For example V(libyaml-dev), V(libyaml-devel).
- To operate on several packages this can accept a comma separated string of packages or a list of packages, depending on the underlying package manager.
required: true
state:
description:
- Whether to install (V(present)), or remove (V(absent)) a package.
- You can use other states like V(latest) ONLY if they are supported by the underlying package module(s) executed.
required: true
use:
description:
- The required package manager module to use (V(dnf), V(apt), and so on). The default V(auto) will use existing facts or try to auto-detect it.
- You should only use this field if the automatic selection is not working for some reason.
- Since version 2.17 you can use the C(ansible_package_use) variable to override the automatic detection, but this option still takes precedence.
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: full
bypass_host_loop:
support: none
check_mode:
details: support depends on the underlying plugin invoked
support: N/A
diff_mode:
details: support depends on the underlying plugin invoked
support: N/A
platform:
details: The support depends on the availability for the specific plugin for each platform and if fact gathering is able to detect it
platforms: all
notes:
- While M(ansible.builtin.package) abstracts package managers to ease dealing with multiple distributions, package name often differs for the same software.
"""
EXAMPLES = """
- name: Install ntpdate
ansible.builtin.package:
name: ntpdate
state: present
# This uses a variable as this changes per distribution.
- name: Remove the apache package
ansible.builtin.package:
name: "{{ apache }}"
state: absent
- name: Install the latest version of Apache and MariaDB
ansible.builtin.package:
name:
- httpd
- mariadb-server
state: latest
"""
| 3,748
|
Python
|
.py
| 81
| 41.728395
| 160
| 0.738798
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,169
|
import_playbook.py
|
ansible_ansible/lib/ansible/modules/import_playbook.py
|
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
author: Ansible Core Team (@ansible)
module: import_playbook
short_description: Import a playbook
description:
- Includes a file with a list of plays to be executed.
- Files with a list of plays can only be included at the top level.
- You cannot use this action inside a play.
version_added: "2.4"
options:
free-form:
description:
- The name of the imported playbook is specified directly without any other option.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
- action_core.import
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: all
notes:
- This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
seealso:
- module: ansible.builtin.import_role
- module: ansible.builtin.import_tasks
- module: ansible.builtin.include_role
- module: ansible.builtin.include_tasks
- ref: playbooks_reuse
description: More information related to including and importing playbooks, roles and tasks.
"""
EXAMPLES = r"""
- hosts: localhost
tasks:
- ansible.builtin.debug:
msg: play1
- name: Include a play after another play
ansible.builtin.import_playbook: otherplays.yaml
- name: Set variables on an imported playbook
ansible.builtin.import_playbook: otherplays.yml
vars:
service: httpd
- name: Include a playbook from a collection
ansible.builtin.import_playbook: my_namespace.my_collection.my_playbook
- name: This DOES NOT WORK
hosts: all
tasks:
- ansible.builtin.debug:
msg: task1
- name: This fails because I'm inside a play already
ansible.builtin.import_playbook: stuff.yaml
"""
RETURN = r"""
# This module does not return anything except plays to execute.
"""
| 2,056
|
Python
|
.py
| 65
| 28.615385
| 100
| 0.74798
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,170
|
script.py
|
ansible_ansible/lib/ansible/modules/script.py
|
# Copyright: (c) 2012, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: script
version_added: "0.9"
short_description: Runs a local script on a remote node after transferring it
description:
- The M(ansible.builtin.script) module takes the script name followed by a list of space-delimited arguments.
- Either a free-form command or O(cmd) parameter is required, see the examples.
- The local script at the path will be transferred to the remote node and then executed.
- The given script will be processed through the shell environment on the remote node.
- This module does not require Python on the remote system, much like the M(ansible.builtin.raw) module.
- This module is also supported for Windows targets.
options:
free_form:
description:
- Path to the local script file followed by optional arguments.
type: str
cmd:
type: str
description:
- Path to the local script to run followed by optional arguments.
creates:
description:
- A filename on the remote node, when it already exists, this step will B(not) be run.
version_added: "1.5"
type: str
removes:
description:
- A filename on the remote node, when it does not exist, this step will B(not) be run.
version_added: "1.5"
type: str
chdir:
description:
- Change into this directory on the remote node before running the script.
version_added: "2.4"
type: str
executable:
description:
- Name or path of an executable to invoke the script with.
version_added: "2.6"
type: str
notes:
- It is usually preferable to write Ansible modules rather than pushing scripts. Convert your script to an Ansible module for bonus points!
- The P(ansible.builtin.ssh#connection) connection plugin will force pseudo-tty allocation via C(-tt) when scripts are executed.
Pseudo-ttys do not have a stderr channel and all stderr is sent to stdout. If you depend on separated stdout and stderr result keys,
please switch to a set of tasks that comprises M(ansible.builtin.copy) with M(ansible.builtin.command) instead of using M(ansible.builtin.script).
- If the path to the local script contains spaces, it needs to be quoted.
- This module is also supported for Windows targets.
- If the script returns non-UTF-8 data, it must be encoded to avoid issues. One option is to pipe
the output through C(base64).
seealso:
- module: ansible.builtin.shell
- module: ansible.windows.win_shell
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.files
- action_common_attributes.raw
- decrypt
attributes:
check_mode:
support: partial
details: while the script itself is arbitrary and cannot be subject to the check mode semantics it adds O(creates)/O(removes) options as a workaround
diff_mode:
support: none
platform:
details: This action is one of the few that requires no Python on the remote as it passes the command directly into the connection string
platforms: all
raw:
support: full
safe_file_operations:
support: none
vault:
support: full
"""
EXAMPLES = r"""
- name: Run a script with arguments (free form)
ansible.builtin.script: /some/local/script.sh --some-argument 1234
- name: Run a script with arguments (using 'cmd' parameter)
ansible.builtin.script:
cmd: /some/local/script.sh --some-argument 1234
- name: Run a script only if file.txt does not exist on the remote node
ansible.builtin.script: /some/local/create_file.sh --some-argument 1234
args:
creates: /the/created/file.txt
- name: Run a script only if file.txt exists on the remote node
ansible.builtin.script: /some/local/remove_file.sh --some-argument 1234
args:
removes: /the/removed/file.txt
- name: Run a script using an executable in a non-system path
ansible.builtin.script: /some/local/script
args:
executable: /some/remote/executable
- name: Run a script using an executable in a system path
ansible.builtin.script: /some/local/script.py
args:
executable: python3
- name: Run a Powershell script on a Windows host
script: subdirectories/under/path/with/your/playbook/script.ps1
"""
| 4,410
|
Python
|
.py
| 105
| 38.066667
| 157
| 0.741327
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,171
|
set_stats.py
|
ansible_ansible/lib/ansible/modules/set_stats.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible RedHat, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: set_stats
short_description: Define and display stats for the current ansible run
description:
- This module allows setting/accumulating stats on the current ansible run, either per host or for all hosts in the run.
- This module is also supported for Windows targets.
author: Brian Coca (@bcoca)
options:
data:
description:
- A dictionary of which each key represents a stat (or variable) you want to keep track of.
type: dict
required: true
per_host:
description:
- Whether the stats are per host or for all hosts in the run.
type: bool
default: no
aggregate:
description:
- Whether the provided value is aggregated to the existing stat V(true) or will replace it V(false).
type: bool
default: yes
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
attributes:
action:
details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
support: partial
bypass_host_loop:
support: none
bypass_task_loop:
support: none
core:
details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
support: partial
check_mode:
support: full
delegation:
support: none
diff_mode:
support: none
notes:
- In order for custom stats to be displayed, you must set C(show_custom_stats) in section C([defaults]) in C(ansible.cfg)
or by defining environment variable C(ANSIBLE_SHOW_CUSTOM_STATS) to V(true). See the P(ansible.builtin.default#callback) callback plugin for details.
version_added: "2.3"
"""
EXAMPLES = r"""
- name: Aggregating packages_installed stat per host
ansible.builtin.set_stats:
data:
packages_installed: 31
per_host: yes
- name: Aggregating random stats for all hosts using complex arguments
ansible.builtin.set_stats:
data:
one_stat: 11
other_stat: "{{ local_var * 2 }}"
another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
per_host: no
- name: Setting stats (not aggregating)
ansible.builtin.set_stats:
data:
the_answer: 42
aggregate: no
"""
| 2,641
|
Python
|
.py
| 74
| 31.081081
| 159
| 0.710547
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,172
|
uri.py
|
ansible_ansible/lib/ansible/modules/uri.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Romeo Theriault <romeot () hawaii.edu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
- For Windows targets, use the M(ansible.windows.win_uri) module instead.
version_added: "1.1"
options:
ciphers:
description:
- SSL/TLS Ciphers to use for the request.
- 'When a list is provided, all ciphers are joined in order with V(:)'
- See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
for more details.
- The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions.
type: list
elements: str
version_added: '2.14'
decompress:
description:
- Whether to attempt to decompress gzip content-encoded responses.
type: bool
default: true
version_added: '2.14'
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path.
type: str
required: true
dest:
description:
- A path of where to download the file to (if desired). If O(dest) is a
directory, the basename of the file on the remote server will be used.
type: path
url_username:
description:
- A username for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ user ]
url_password:
description:
- A password for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ password ]
body:
description:
- The body of the http request/response to the web service. If O(body_format) is set
to V(json) it will take an already formatted JSON string or convert a data structure
into JSON.
- If O(body_format) is set to V(form-urlencoded) it will convert a dictionary
or list of tuples into an C(application/x-www-form-urlencoded) string. (Added in v2.7)
- If O(body_format) is set to V(form-multipart) it will convert a dictionary
into C(multipart/form-multipart) body. (Added in v2.10)
type: raw
body_format:
description:
- The serialization format of the body. When set to V(json), V(form-multipart), or V(form-urlencoded), encodes
the body argument, if needed, and automatically sets the C(Content-Type) header accordingly.
- As of v2.3 it is possible to override the C(Content-Type) header, when
set to V(json) or V(form-urlencoded) via the O(headers) option.
- The C(Content-Type) header cannot be overridden when using V(form-multipart).
- V(form-urlencoded) was added in v2.7.
- V(form-multipart) was added in v2.10.
type: str
choices: [ form-urlencoded, json, raw, form-multipart ]
default: raw
version_added: "2.0"
method:
description:
- The HTTP method of the request or response.
- In more recent versions we do not restrict the method at the module level anymore
but it still must be a valid method accepted by the service handling the request.
type: str
default: GET
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result no matter it succeeded or failed.
- Independently of this option, if the reported C(Content-Type) is C(application/json), then the JSON is
always loaded into a key called RV(ignore:json) in the dictionary results.
type: bool
default: no
force_basic_auth:
description:
- Force the sending of the Basic authentication header upon initial request.
- When this setting is V(false), this module will first try an unauthenticated request, and when the server replies
with an C(HTTP 401) error, it will submit the Basic authentication header.
- When this setting is V(true), this module will immediately send a Basic authentication header on the first
request.
- "Use this setting in any of the following scenarios:"
- You know the webservice endpoint always requires HTTP Basic authentication, and you want to speed up your
requests by eliminating the first roundtrip.
- The web service does not properly send an HTTP 401 error to your client, so Ansible's HTTP library will not
properly respond with HTTP credentials, and logins will fail.
- The webservice bans or rate-limits clients that cause any HTTP 401 errors.
type: bool
default: no
follow_redirects:
description:
- Whether or not the URI module should follow redirects.
type: str
default: safe
choices:
all: Will follow all redirects.
none: Will not follow any redirects.
safe: Only redirects doing GET or HEAD requests will be followed.
urllib2: Defer to urllib2 behavior (As of writing this follows HTTP redirects).
'no': (DEPRECATED, removed in 2.22) alias of V(none).
'yes': (DEPRECATED, removed in 2.22) alias of V(all).
creates:
description:
- A filename, when it already exists, this step will not be run.
type: path
removes:
description:
- A filename, when it does not exist, this step will not be run.
type: path
status_code:
description:
- A list of valid, numeric, HTTP status codes that signifies success of the request.
type: list
elements: int
default: [ 200 ]
timeout:
description:
- The socket level timeout in seconds
type: int
default: 30
headers:
description:
- Add custom HTTP headers to a request in the format of a YAML hash. As
of Ansible 2.3 supplying C(Content-Type) here will override the header
generated by supplying V(json) or V(form-urlencoded) for O(body_format).
type: dict
default: {}
version_added: '2.1'
validate_certs:
description:
- If V(false), SSL certificates will not be validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates.
- Prior to 1.9.2 the code defaulted to V(false).
type: bool
default: true
version_added: '1.9.2'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, O(client_key) is not required.
type: path
version_added: '2.4'
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If O(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '2.4'
ca_path:
description:
- PEM formatted file that contains a CA certificate to be used for validation.
type: path
version_added: '2.11'
src:
description:
- Path to file to be submitted to the remote server.
- Cannot be used with O(body).
- Should be used with O(force_basic_auth) to ensure success when the remote end sends a 401.
type: path
version_added: '2.7'
remote_src:
description:
- If V(false), the module will search for the O(src) on the controller node.
- If V(true), the module will search for the O(src) on the managed (remote) node.
type: bool
default: no
version_added: '2.7'
force:
description:
- If V(true) do not get a cached copy.
type: bool
default: no
use_proxy:
description:
- If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: true
unix_socket:
description:
- Path to Unix domain socket to use for connection.
type: path
version_added: '2.8'
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
unredirected_headers:
description:
- A list of header names that will not be sent on subsequent redirected requests. This list is case
insensitive. By default all headers will be redirected. In some cases it may be beneficial to list
headers such as C(Authorization) here to avoid potential credential exposure.
default: []
type: list
elements: str
version_added: '2.12'
use_gssapi:
description:
- Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with O(url_username)/O(url_password) or with the GSSAPI env var
C(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is B(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
use_netrc:
description:
- Determining whether to use credentials from C(~/.netrc) file.
- By default C(.netrc) is used with Basic authentication headers.
- When V(false), C(.netrc) credentials are ignored.
type: bool
default: true
version_added: '2.14'
extends_documentation_fragment:
- action_common_attributes
- files
attributes:
check_mode:
support: none
diff_mode:
support: none
platform:
platforms: posix
notes:
- The dependency on httplib2 was removed in Ansible 2.1.
- The module returns all the HTTP headers in lower-case.
- For Windows targets, use the M(ansible.windows.win_uri) module instead.
seealso:
- module: ansible.builtin.get_url
- module: ansible.windows.win_uri
author:
- Romeo Theriault (@romeotheriault)
"""
EXAMPLES = r"""
- name: Check that you can connect (GET) to a page and it returns a status 200
ansible.builtin.uri:
url: http://www.example.com
- name: Check that a page returns successfully but fail if the word AWESOME is not in the page contents
ansible.builtin.uri:
url: http://www.example.com
return_content: true
register: this
failed_when: this is failed or "'AWESOME' not in this.content"
- name: Create a JIRA issue
ansible.builtin.uri:
url: https://your.jira.example.com/rest/api/2/issue/
user: your_username
password: your_pass
method: POST
body: "{{ lookup('ansible.builtin.file','issue.json') }}"
force_basic_auth: true
status_code: 201
body_format: json
- name: Login to a form based webpage, then use the returned cookie to access the app in later tasks
ansible.builtin.uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
name: your_username
password: your_password
enter: Sign in
status_code: 302
register: login
- name: Login to a form based webpage using a list of tuples
ansible.builtin.uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
- [ name, your_username ]
- [ password, your_password ]
- [ enter, Sign in ]
status_code: 302
register: login
- name: Upload a file via multipart/form-multipart
ansible.builtin.uri:
url: https://httpbin.org/post
method: POST
body_format: form-multipart
body:
file1:
filename: /bin/true
mime_type: application/octet-stream
file2:
content: text based file content
filename: fake.txt
mime_type: text/plain
text_form_field: value
- name: Connect to website using a previously stored cookie
ansible.builtin.uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: true
headers:
Cookie: "{{ login.cookies_string }}"
- name: Queue build of a project in Jenkins
ansible.builtin.uri:
url: http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
method: GET
force_basic_auth: true
status_code: 201
- name: POST from contents of local file
ansible.builtin.uri:
url: https://httpbin.org/post
method: POST
src: file.json
- name: POST from contents of remote file
ansible.builtin.uri:
url: https://httpbin.org/post
method: POST
src: /path/to/my/file.json
remote_src: true
- name: Create workspaces in Log analytics Azure
ansible.builtin.uri:
url: https://www.mms.microsoft.com/Embedded/Api/ConfigDataSources/LogManagementData/Save
method: POST
body_format: json
status_code: [200, 202]
return_content: true
headers:
Content-Type: application/json
x-ms-client-workspace-path: /subscriptions/{{ sub_id }}/resourcegroups/{{ res_group }}/providers/microsoft.operationalinsights/workspaces/{{ w_spaces }}
x-ms-client-platform: ibiza
x-ms-client-auth-token: "{{ token_az }}"
body:
- name: Pause play until a URL is reachable from this host
ansible.builtin.uri:
url: "http://192.0.2.1/some/test"
follow_redirects: none
method: GET
register: _result
until: _result.status == 200
retries: 720 # 720 * 5 seconds = 1hour (60*60/5)
delay: 5 # Every 5 seconds
- name: Provide SSL/TLS ciphers as a list
uri:
url: https://example.org
ciphers:
- '@SECLEVEL=2'
- ECDH+AESGCM
- ECDH+CHACHA20
- ECDH+AES
- DHE+AES
- '!aNULL'
- '!eNULL'
- '!aDSS'
- '!SHA1'
- '!AESCCM'
- name: Provide SSL/TLS ciphers as an OpenSSL formatted cipher list
uri:
url: https://example.org
ciphers: '@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM'
"""
RETURN = r"""
# The return information includes all the HTTP headers in lower-case.
content:
description: The response body content.
returned: status not in status_code or return_content is true
type: str
sample: "{}"
cookies:
description: The cookie values placed in cookie jar.
returned: on success
type: dict
sample: {"SESSIONID": "[SESSIONID]"}
version_added: "2.4"
cookies_string:
description: The value for future request Cookie headers.
returned: on success
type: str
sample: "SESSIONID=[SESSIONID]"
version_added: "2.6"
elapsed:
description: The number of seconds that elapsed while performing the download.
returned: on success
type: int
sample: 23
msg:
description: The HTTP message from the request.
returned: always
type: str
sample: OK (unknown bytes)
path:
description: destination file/path
returned: dest is defined
type: str
sample: /path/to/file.txt
redirected:
description: Whether the request was redirected.
returned: on success
type: bool
sample: false
status:
description: The HTTP status code from the request.
returned: always
type: int
sample: 200
url:
description: The actual URL used for the request.
returned: always
type: str
sample: https://www.ansible.com/
"""
import json
import os
import re
import shutil
import tempfile
from ansible.module_utils.basic import AnsibleModule, sanitize_keys
from ansible.module_utils.six import binary_type, iteritems, string_types
from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.compat.datetime import utcnow, utcfromtimestamp
from ansible.module_utils.six.moves.collections_abc import Mapping, Sequence
from ansible.module_utils.urls import fetch_url, get_response_filename, parse_content_type, prepare_multipart, url_argument_spec
JSON_CANDIDATES = {'json', 'javascript'}
# List of response key names we do not want sanitize_keys() to change.
NO_MODIFY_KEYS = frozenset(
('msg', 'exception', 'warnings', 'deprecations', 'failed', 'skipped',
'changed', 'rc', 'stdout', 'stderr', 'elapsed', 'path', 'location',
'content_type')
)
def format_message(err, resp):
msg = resp.pop('msg')
return err + (' %s' % msg if msg else '')
def write_file(module, dest, content, resp):
"""
Create temp file and write content to dest file only if content changed
"""
tmpsrc = None
try:
fd, tmpsrc = tempfile.mkstemp(dir=module.tmpdir)
with os.fdopen(fd, 'wb') as f:
if isinstance(content, binary_type):
f.write(content)
else:
shutil.copyfileobj(content, f)
except Exception as e:
if tmpsrc and os.path.exists(tmpsrc):
os.remove(tmpsrc)
msg = format_message("Failed to create temporary content file: %s" % to_native(e), resp)
module.fail_json(msg=msg, **resp)
checksum_src = module.sha1(tmpsrc)
checksum_dest = module.sha1(dest)
if checksum_src != checksum_dest:
try:
module.atomic_move(tmpsrc, dest)
except Exception as e:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
msg = format_message("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), resp)
module.fail_json(msg=msg, **resp)
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
def absolute_location(url, location):
"""Attempts to create an absolute URL based on initial URL, and
next URL, specifically in the case of a ``Location`` header.
"""
if '://' in location:
return location
elif location.startswith('/'):
parts = urlsplit(url)
base = url.replace(parts[2], '')
return '%s%s' % (base, location)
elif not location.startswith('/'):
base = os.path.dirname(url)
return '%s/%s' % (base, location)
else:
return location
def kv_list(data):
""" Convert data into a list of key-value tuples """
if data is None:
return None
if isinstance(data, Sequence):
return list(data)
if isinstance(data, Mapping):
return list(data.items())
raise TypeError('cannot form-urlencode body, expect list or dict')
def form_urlencoded(body):
""" Convert data into a form-urlencoded string """
if isinstance(body, string_types):
return body
if isinstance(body, (Mapping, Sequence)):
result = []
# Turn a list of lists into a list of tuples that urlencode accepts
for key, values in kv_list(body):
if isinstance(values, string_types) or not isinstance(values, (Mapping, Sequence)):
values = [values]
for value in values:
if value is not None:
result.append((to_text(key), to_text(value)))
return urlencode(result, doseq=True)
return body
def uri(module, url, dest, body, body_format, method, headers, socket_timeout, ca_path, unredirected_headers, decompress,
ciphers, use_netrc):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
src = module.params['src']
if src:
try:
headers.update({
'Content-Length': os.stat(src).st_size
})
data = open(src, 'rb')
except OSError:
module.fail_json(msg='Unable to open source file %s' % src, elapsed=0)
else:
data = body
kwargs = {}
if dest is not None and os.path.isfile(dest):
# if destination file already exist, only download if file newer
kwargs['last_mod_time'] = utcfromtimestamp(os.path.getmtime(dest))
if module.params.get('follow_redirects') in ('no', 'yes'):
module.deprecate(
"Using 'yes' or 'no' for 'follow_redirects' parameter is deprecated.",
version='2.22'
)
resp, info = fetch_url(module, url, data=data, headers=headers,
method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
ca_path=ca_path, unredirected_headers=unredirected_headers,
use_proxy=module.params['use_proxy'], decompress=decompress,
ciphers=ciphers, use_netrc=use_netrc, force=module.params['force'], **kwargs)
if src:
# Try to close the open file handle
try:
data.close()
except Exception:
pass
return resp, info
def main():
argument_spec = url_argument_spec()
argument_spec.update(
dest=dict(type='path'),
url_username=dict(type='str', aliases=['user']),
url_password=dict(type='str', aliases=['password'], no_log=True),
body=dict(type='raw'),
body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw', 'form-multipart']),
src=dict(type='path'),
method=dict(type='str', default='GET'),
return_content=dict(type='bool', default=False),
follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
creates=dict(type='path'),
removes=dict(type='path'),
status_code=dict(type='list', elements='int', default=[200]),
timeout=dict(type='int', default=30),
headers=dict(type='dict', default={}),
unix_socket=dict(type='path'),
remote_src=dict(type='bool', default=False),
ca_path=dict(type='path', default=None),
unredirected_headers=dict(type='list', elements='str', default=[]),
decompress=dict(type='bool', default=True),
ciphers=dict(type='list', elements='str'),
use_netrc=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
mutually_exclusive=[['body', 'src']],
)
url = module.params['url']
body = module.params['body']
body_format = module.params['body_format'].lower()
method = module.params['method'].upper()
dest = module.params['dest']
return_content = module.params['return_content']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
ca_path = module.params['ca_path']
dict_headers = module.params['headers']
unredirected_headers = module.params['unredirected_headers']
decompress = module.params['decompress']
ciphers = module.params['ciphers']
use_netrc = module.params['use_netrc']
if not re.match('^[A-Z]+$', method):
module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")
if body_format == 'json':
# Encode the body unless its a string, then assume it is pre-formatted JSON
if not isinstance(body, string_types):
body = json.dumps(body)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/json'
elif body_format == 'form-urlencoded':
if not isinstance(body, string_types):
try:
body = form_urlencoded(body)
except ValueError as e:
module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
elif body_format == 'form-multipart':
try:
content_type, body = prepare_multipart(body)
except (TypeError, ValueError) as e:
module.fail_json(msg='failed to parse body as form-multipart: %s' % to_native(e))
dict_headers['Content-Type'] = content_type
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
if os.path.exists(creates):
module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of uri executions.
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
# Make the request
start = utcnow()
r, info = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout, ca_path, unredirected_headers,
decompress, ciphers, use_netrc)
elapsed = (utcnow() - start).seconds
if r and dest is not None and os.path.isdir(dest):
filename = get_response_filename(r) or 'index.html'
dest = os.path.join(dest, filename)
if r and r.fp is not None:
# r may be None for some errors
# r.fp may be None depending on the error, which means there are no headers either
content_type, main_type, sub_type, content_encoding = parse_content_type(r)
else:
content_type = 'application/octet-stream'
main_type = 'application'
sub_type = 'octet-stream'
content_encoding = 'utf-8'
if sub_type and '+' in sub_type:
# https://www.rfc-editor.org/rfc/rfc6839#section-3.1
sub_type_suffix = sub_type.partition('+')[2]
maybe_json = content_type and sub_type_suffix.lower() in JSON_CANDIDATES
elif sub_type:
maybe_json = content_type and sub_type.lower() in JSON_CANDIDATES
else:
maybe_json = False
maybe_output = maybe_json or return_content or info['status'] not in status_code
if maybe_output:
try:
if r.fp is None or r.closed:
raise TypeError
content = r.read()
except (AttributeError, TypeError):
# there was no content, but the error read()
# may have been stored in the info as 'body'
content = info.pop('body', b'')
elif r:
content = r
else:
content = None
resp = {}
resp['redirected'] = info['url'] != url
resp.update(info)
resp['elapsed'] = elapsed
resp['status'] = int(resp['status'])
resp['changed'] = False
# Write the file out if requested
if r and dest is not None:
if resp['status'] in status_code and resp['status'] != 304:
write_file(module, dest, content, resp)
# allow file attribute changes
resp['changed'] = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params, path=dest)
resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
resp['path'] = dest
# Transmogrify the headers, replacing '-' with '_', since variables don't
# work with dashes.
# In python3, the headers are title cased. Lowercase them to be
# compatible with the python2 behaviour.
uresp = {}
for key, value in iteritems(resp):
ukey = key.replace("-", "_").lower()
uresp[ukey] = value
if 'location' in uresp:
uresp['location'] = absolute_location(url, uresp['location'])
# Default content_encoding to try
if isinstance(content, binary_type):
u_content = to_text(content, encoding=content_encoding)
if maybe_json:
try:
js = json.loads(u_content)
uresp['json'] = js
except Exception:
...
else:
u_content = None
if module.no_log_values:
uresp = sanitize_keys(uresp, module.no_log_values, NO_MODIFY_KEYS)
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
if return_content:
module.fail_json(content=u_content, **uresp)
else:
module.fail_json(**uresp)
elif return_content:
module.exit_json(content=u_content, **uresp)
else:
module.exit_json(**uresp)
if __name__ == '__main__':
main()
| 28,390
|
Python
|
.py
| 719
| 33.194715
| 158
| 0.666485
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,173
|
import_tasks.py
|
ansible_ansible/lib/ansible/modules/import_tasks.py
|
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
author: Ansible Core Team (@ansible)
module: import_tasks
short_description: Import a task list
description:
- Imports a list of tasks to be added to the current playbook for subsequent execution.
version_added: "2.4"
options:
free-form:
description:
- |
Specifies the name of the imported file directly without any other option C(- import_tasks: file.yml).
- Most keywords, including loops and conditionals, only apply to the imported tasks, not to this statement itself.
- If you need any of those to apply, use M(ansible.builtin.include_tasks) instead.
file:
description:
- Specifies the name of the file that lists tasks to add to the current playbook.
type: str
version_added: '2.7'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
- action_core.import
attributes:
check_mode:
support: none
diff_mode:
support: none
notes:
- This is a core feature of Ansible, rather than a module, and cannot be overridden like a module
seealso:
- module: ansible.builtin.import_playbook
- module: ansible.builtin.import_role
- module: ansible.builtin.include_role
- module: ansible.builtin.include_tasks
- ref: playbooks_reuse
description: More information related to including and importing playbooks, roles and tasks.
"""
EXAMPLES = r"""
- hosts: all
tasks:
- ansible.builtin.debug:
msg: task1
- name: Include task list in play
ansible.builtin.import_tasks:
file: stuff.yaml
- ansible.builtin.debug:
msg: task10
- hosts: all
tasks:
- ansible.builtin.debug:
msg: task1
- name: Apply conditional to all imported tasks
ansible.builtin.import_tasks: stuff.yaml
when: hostvar is defined
"""
RETURN = r"""
# This module does not return anything except tasks to execute.
"""
| 2,137
|
Python
|
.py
| 66
| 28.621212
| 120
| 0.72392
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,174
|
debug.py
|
ansible_ansible/lib/ansible/modules/debug.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: debug
short_description: Print statements during execution
description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook.
- Useful for debugging together with the C(when:) directive.
- This module is also supported for Windows targets.
version_added: '0.8'
options:
msg:
description:
- The customized message that is printed. If omitted, prints a generic message.
type: str
default: 'Hello world!'
var:
description:
- A variable name to debug.
- Mutually exclusive with the O(msg) option.
- Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping,
so you should not be using Jinja2 delimiters unless you are looking for double interpolation.
type: str
verbosity:
description:
- A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above.
type: int
default: 0
version_added: '2.1'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
bypass_host_loop:
support: none
become:
support: none
check_mode:
support: full
diff_mode:
support: none
connection:
support: none
delegation:
details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
support: partial
platform:
support: full
platforms: all
seealso:
- module: ansible.builtin.assert
- module: ansible.builtin.fail
author:
- Dag Wieers (@dagwieers)
- Michael DeHaan
"""
EXAMPLES = r"""
- name: Print the gateway for each host when defined
ansible.builtin.debug:
msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}
when: ansible_default_ipv4.gateway is defined
- name: Get uptime information
ansible.builtin.shell: /usr/bin/uptime
register: result
- name: Print return information from the previous task
ansible.builtin.debug:
var: result
verbosity: 2
- name: Display all variables/facts known for a host
ansible.builtin.debug:
var: hostvars[inventory_hostname]
verbosity: 4
- name: Prints two lines of messages, but only if there is an environment value set
ansible.builtin.debug:
msg:
- "Provisioning based on YOUR_KEY which is: {{ lookup('ansible.builtin.env', 'YOUR_KEY') }}"
- "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
"""
| 2,908
|
Python
|
.py
| 88
| 29.181818
| 111
| 0.722776
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,175
|
service_facts.py
|
ansible_ansible/lib/ansible/modules/service_facts.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# originally copied from AWX's scan_services module to bring this functionality
# into Core
from __future__ import annotations
DOCUMENTATION = r"""
---
module: service_facts
short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities.
version_added: "2.5"
requirements: ["Any of the following supported init systems: systemd, sysv, upstart, openrc, AIX SRC"]
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
attributes:
check_mode:
support: full
diff_mode:
support: none
facts:
support: full
platform:
platforms: posix
notes:
- When accessing the RV(ansible_facts.services) facts collected by this module,
it is recommended to not use "dot notation" because services can have a C(-)
character in their name which would result in invalid "dot notation", such as
C(ansible_facts.services.zuul-gateway). It is instead recommended to
using the string value of the service name as the key in order to obtain
the fact data value like C(ansible_facts.services['zuul-gateway'])
- AIX SRC was added in version 2.11.
author:
- Adam Miller (@maxamillion)
"""
EXAMPLES = r"""
- name: Populate service facts
ansible.builtin.service_facts:
- name: Print service facts
ansible.builtin.debug:
var: ansible_facts.services
- name: show names of existing systemd services, sometimes systemd knows about services that were never installed
debug: msg={{ existing_systemd_services | map(attribute='name') }}
vars:
known_systemd_services: "{{ ansible_facts['services'].values() | selectattr('source', 'equalto', 'systemd') }}"
existing_systemd_services: "{{ known_systemd_services | rejectattr('status', 'equalto', 'not-found') }}"
- name: restart systemd service if it exists
service:
state: restarted
name: ntpd.service
when: ansible_facts['services']['ntpd.service']['status'] | default('not-found') != 'not-found'
"""
RETURN = r"""
ansible_facts:
description: Facts to add to ansible_facts about the services on the system
returned: always
type: complex
contains:
services:
description: States of the services with service name as key.
returned: always
type: list
elements: dict
contains:
source:
description:
- Init system of the service.
- One of V(rcctl), V(systemd), V(sysv), V(upstart), V(src).
returned: always
type: str
sample: sysv
state:
description:
- State of the service.
- 'This commonly includes (but is not limited to) the following: V(failed), V(running), V(stopped) or V(unknown).'
- Depending on the used init system additional states might be returned.
returned: always
type: str
sample: running
status:
description:
- State of the service.
- Either V(enabled), V(disabled), V(static), V(indirect) or V(unknown).
returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart or OpenBSD
type: str
sample: enabled
name:
description: Name of the service.
returned: always
type: str
sample: arp-ethers.service
"""
import os
import platform
import re
import sys
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.service import is_systemd_managed
class BaseService(object):
def __init__(self, module):
self.module = module
class ServiceScanService(BaseService):
def _list_sysvinit(self, services):
rc, stdout, stderr = self.module.run_command("%s --status-all" % self.service_path)
if rc == 4 and not os.path.exists('/etc/init.d'):
# This function is not intended to run on Red Hat but it could happen
# if `chkconfig` is not installed. `service` on RHEL9 returns rc 4
# when /etc/init.d is missing, add the extra guard of checking /etc/init.d
# instead of solely relying on rc == 4
return
if rc != 0:
self.module.warn("Unable to query 'service' tool (%s): %s" % (rc, stderr))
p = re.compile(r'^\s*\[ (?P<state>\+|\-) \]\s+(?P<name>.+)$', flags=re.M)
for match in p.finditer(stdout):
service_name = match.group('name')
if match.group('state') == "+":
service_state = "running"
else:
service_state = "stopped"
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
def _list_upstart(self, services):
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % self.initctl_path)
if rc != 0:
self.module.warn('Unable to query upstart for service data: %s' % stderr)
else:
real_stdout = stdout.replace("\r", "")
for line in real_stdout.split("\n"):
m = p.match(line)
if not m:
continue
service_name = m.group('name')
service_goal = m.group('goal')
service_state = m.group('state')
if m.group('pid'):
pid = m.group('pid')
else:
pid = None # NOQA
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
services[service_name] = payload
def _list_rh(self, services):
p = re.compile(
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % self.chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
for line in stdout.split('\n'):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):
match_any = True
if match_any:
# Try extra flags " -l --allservices" needed for SLES11
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % self.chkconfig_path, use_unsafe_shell=True)
elif '--list' in stderr:
# Extra flag needed for RHEL5
rc, stdout, stderr = self.module.run_command('%s --list' % self.chkconfig_path, use_unsafe_shell=True)
for line in stdout.split('\n'):
m = p.match(line)
if m:
service_name = m.group('service')
service_state = 'stopped'
service_status = "disabled"
if m.group('rl3') == 'on':
service_status = "enabled"
rc, stdout, stderr = self.module.run_command('%s %s status' % (self.service_path, service_name), use_unsafe_shell=True)
service_state = rc
if rc in (0,):
service_state = 'running'
# elif rc in (1,3):
else:
output = stderr.lower()
for x in ('root', 'permission', 'not in sudoers'):
if x in output:
self.module.warn('Insufficient permissions to query sysV service "%s" and their states' % service_name)
break
else:
service_state = 'stopped'
service_data = {"name": service_name, "state": service_state, "status": service_status, "source": "sysv"}
services[service_name] = service_data
def _list_openrc(self, services):
all_services_runlevels = {}
rc, stdout, stderr = self.module.run_command("%s -a -s -m 2>&1 | grep '^ ' | tr -d '[]'" % self.rc_status_path, use_unsafe_shell=True)
rc_u, stdout_u, stderr_u = self.module.run_command("%s show -v 2>&1 | grep '|'" % self.rc_update_path, use_unsafe_shell=True)
for line in stdout_u.split('\n'):
line_data = line.split('|')
if len(line_data) < 2:
continue
service_name = line_data[0].strip()
runlevels = line_data[1].strip()
if not runlevels:
all_services_runlevels[service_name] = None
else:
all_services_runlevels[service_name] = runlevels.split()
for line in stdout.split('\n'):
line_data = line.split()
if len(line_data) < 2:
continue
service_name = line_data[0]
service_state = line_data[1]
service_runlevels = all_services_runlevels[service_name]
service_data = {"name": service_name, "runlevels": service_runlevels, "state": service_state, "source": "openrc"}
services[service_name] = service_data
def gather_services(self):
services = {}
# find cli tools if available
self.service_path = self.module.get_bin_path("service")
self.chkconfig_path = self.module.get_bin_path("chkconfig")
self.initctl_path = self.module.get_bin_path("initctl")
self.rc_status_path = self.module.get_bin_path("rc-status")
self.rc_update_path = self.module.get_bin_path("rc-update")
if self.service_path and self.chkconfig_path is None and self.rc_status_path is None:
self._list_sysvinit(services)
# TODO: review conditionals ... they should not be this 'exclusive'
if self.initctl_path and self.chkconfig_path is None:
self._list_upstart(services)
elif self.chkconfig_path:
self._list_rh(services)
elif self.rc_status_path is not None and self.rc_update_path is not None:
self._list_openrc(services)
return services
class SystemctlScanService(BaseService):
BAD_STATES = frozenset(['not-found', 'masked', 'failed'])
def systemd_enabled(self):
return is_systemd_managed(self.module)
def _list_from_units(self, systemctl_path, services):
# list units as systemd sees them
rc, stdout, stderr = self.module.run_command("%s list-units --no-pager --type service --all --plain" % systemctl_path, use_unsafe_shell=True)
if rc != 0:
self.module.warn("Could not list units from systemd: %s" % stderr)
else:
for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line]:
state_val = "stopped"
status_val = "unknown"
fields = line.split()
# systemd sometimes gives misleading status
# check all fields for bad states
for bad in self.BAD_STATES:
# except description
if bad in fields[:-1]:
status_val = bad
break
else:
# active/inactive
status_val = fields[2]
service_name = fields[0]
if fields[3] == "running":
state_val = "running"
services[service_name] = {"name": service_name, "state": state_val, "status": status_val, "source": "systemd"}
def _list_from_unit_files(self, systemctl_path, services):
# now try unit files for complete picture and final 'status'
rc, stdout, stderr = self.module.run_command("%s list-unit-files --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
if rc != 0:
self.module.warn("Could not get unit files data from systemd: %s" % stderr)
else:
for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line]:
# there is one more column (VENDOR PRESET) from `systemctl list-unit-files` for systemd >= 245
try:
service_name, status_val = line.split()[:2]
except IndexError:
self.module.fail_json(msg="Malformed output discovered from systemd list-unit-files: {0}".format(line))
if service_name not in services:
rc, stdout, stderr = self.module.run_command("%s show %s --property=ActiveState" % (systemctl_path, service_name), use_unsafe_shell=True)
state = 'unknown'
if not rc and stdout != '':
state = stdout.replace('ActiveState=', '').rstrip()
services[service_name] = {"name": service_name, "state": state, "status": status_val, "source": "systemd"}
elif services[service_name]["status"] not in self.BAD_STATES:
services[service_name]["status"] = status_val
def gather_services(self):
services = {}
if self.systemd_enabled():
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
if systemctl_path:
self._list_from_units(systemctl_path, services)
self._list_from_unit_files(systemctl_path, services)
return services
class AIXScanService(BaseService):
def gather_services(self):
services = {}
if platform.system() == 'AIX':
lssrc_path = self.module.get_bin_path("lssrc")
if lssrc_path:
rc, stdout, stderr = self.module.run_command("%s -a" % lssrc_path)
if rc != 0:
self.module.warn("lssrc could not retrieve service data (%s): %s" % (rc, stderr))
else:
for line in stdout.split('\n'):
line_data = line.split()
if len(line_data) < 2:
continue # Skipping because we expected more data
if line_data[0] == "Subsystem":
continue # Skip header
service_name = line_data[0]
if line_data[-1] == "active":
service_state = "running"
elif line_data[-1] == "inoperative":
service_state = "stopped"
else:
service_state = "unknown"
services[service_name] = {"name": service_name, "state": service_state, "source": "src"}
return services
class OpenBSDScanService(BaseService):
def query_rcctl(self, cmd):
svcs = []
rc, stdout, stderr = self.module.run_command("%s ls %s" % (self.rcctl_path, cmd))
if 'needs root privileges' in stderr.lower():
self.module.warn('rcctl requires root privileges')
else:
for svc in stdout.split('\n'):
if svc == '':
continue
else:
svcs.append(svc)
return svcs
def get_info(self, name):
info = {}
rc, stdout, stderr = self.module.run_command("%s get %s" % (self.rcctl_path, name))
if 'needs root privileges' in stderr.lower():
self.module.warn('rcctl requires root privileges')
else:
undy = '%s_' % name
for variable in stdout.split('\n'):
if variable == '' or '=' not in variable:
continue
else:
k, v = variable.replace(undy, '', 1).split('=', 1)
info[k] = v
return info
def gather_services(self):
services = {}
self.rcctl_path = self.module.get_bin_path("rcctl")
if self.rcctl_path:
# populate services will all possible
for svc in self.query_rcctl('all'):
services[svc] = {'name': svc, 'source': 'rcctl', 'rogue': False}
services[svc].update(self.get_info(svc))
for svc in self.query_rcctl('on'):
services[svc].update({'status': 'enabled'})
for svc in self.query_rcctl('started'):
services[svc].update({'state': 'running'})
# Override the state for services which are marked as 'failed'
for svc in self.query_rcctl('failed'):
services[svc].update({'state': 'failed'})
for svc in services.keys():
# Based on the list of services that are enabled/failed, determine which are disabled
if services[svc].get('status') is None:
services[svc].update({'status': 'disabled'})
# and do the same for those are aren't running
if services[svc].get('state') is None:
services[svc].update({'state': 'stopped'})
for svc in self.query_rcctl('rogue'):
services[svc]['rogue'] = True
return services
class FreeBSDScanService(BaseService):
_pid_regex = r'.+ is running as pid (\d+)\.'
def get_info(self, service):
service_info = {'status': 'unknown'}
rc, stdout, stderr = self.module.run_command("%s %s describe" % (self.service, service))
if rc == 0:
service_info['description'] = stdout
rc, stdout, stderr = self.module.run_command("%s %s status" % (self.service, service))
if rc == 0:
service_info['status'] = 'running'
p = re.compile(r'^\s?%s is running as pid (\d+).' % service)
matches = p.match(stdout[0])
if matches:
# does not always get pid output
service_info['pid'] = matches[0]
else:
service_info['pid'] = 'N/A'
elif rc == 1:
if stdout and 'is not running' in stdout.splitlines()[0]:
service_info['status'] = 'stopped'
elif stderr and 'unknown directive' in stderr.splitlines()[0]:
service_info['status'] = 'unknown'
self.module.warn('Status query not supported for %s' % service)
else:
service_info['status'] = 'unknown'
out = stderr if stderr else stdout
self.module.warn('Could not retrieve status for %s: %s' % (service, out))
else:
out = stderr if stderr else stdout
self.module.warn("Failed to get info for %s, no system message (rc=%s): %s" % (service, rc, out))
return service_info
def get_enabled(self):
services = []
rc, stdout, stderr = self.module.run_command("%s -e" % (self.service))
if rc == 0:
for line in stdout.splitlines():
if line.startswith('/'):
services.append(os.path.basename(line))
elif stderr:
self.module.warn("Failed to get services: %s" % stderr)
elif stdout:
self.module.warn("Failed to get services: %s" % stdout)
else:
self.module.warn("Failed to get services, no system message: rc=%s" % rc)
return services
def gather_services(self):
services = {}
if sys.platform.startswith('freebsd'):
self.service = self.module.get_bin_path("service")
if self.service:
for svc in self.get_enabled():
services[svc] = self.get_info(svc)
return services
def main():
module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
locale = get_best_parsable_locale(module)
module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale)
if sys.platform.startswith('freebsd'):
# frebsd is not compatible but will match other classes
service_modules = (FreeBSDScanService,)
else:
service_modules = (ServiceScanService, SystemctlScanService, AIXScanService, OpenBSDScanService)
all_services = {}
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc:
all_services.update(svc)
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. This can be due to privileges or some other configuration issue.")
else:
results = dict(ansible_facts=dict(services=all_services))
module.exit_json(**results)
if __name__ == '__main__':
main()
| 21,129
|
Python
|
.py
| 436
| 36.571101
| 157
| 0.570874
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,176
|
shell.py
|
ansible_ansible/lib/ansible/modules/shell.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# There is no actual shell module source, when you use 'shell' in ansible,
# it runs the 'command' module with special arguments and it behaves differently.
# See the command source and the comment "#USE_SHELL".
from __future__ import annotations
DOCUMENTATION = r"""
---
module: shell
short_description: Execute shell commands on targets
description:
- The M(ansible.builtin.shell) module takes the command name followed by a list of space-delimited arguments.
- Either a free form command or O(cmd) parameter is required, see the examples.
- It is almost exactly like the M(ansible.builtin.command) module but runs
the command through a shell (C(/bin/sh)) on the remote node.
- For Windows targets, use the M(ansible.windows.win_shell) module instead.
version_added: "0.2"
options:
free_form:
description:
- The shell module takes a free form command to run, as a string.
- There is no actual parameter named 'free form'.
- See the examples on how to use this module.
type: str
cmd:
type: str
description:
- The command to run followed by optional arguments.
creates:
description:
- A filename, when it already exists, this step will B(not) be run.
type: path
removes:
description:
- A filename, when it does not exist, this step will B(not) be run.
type: path
version_added: "0.8"
chdir:
description:
- Change into this directory before running the command.
type: path
version_added: "0.6"
executable:
description:
- Change the shell used to execute the command.
- This expects an absolute path to the executable.
type: path
version_added: "0.9"
stdin:
description:
- Set the stdin of the command directly to the specified value.
type: str
version_added: "2.4"
stdin_add_newline:
description:
- Whether to append a newline to stdin data.
type: bool
default: yes
version_added: "2.8"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.raw
attributes:
check_mode:
details: while the command itself is arbitrary and cannot be subject to the check mode semantics it adds O(creates)/O(removes) options as a workaround
support: partial
diff_mode:
support: none
platform:
support: full
platforms: posix
raw:
support: full
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(ansible.builtin.command) module instead. Best practices
when writing playbooks will follow the trend of using M(ansible.builtin.command)
unless the M(ansible.builtin.shell) module is explicitly required. When running ad-hoc
commands, use your best judgement.
- To sanitize any variables passed to the shell module, you should use
C({{ var | quote }}) instead of just C({{ var }}) to make sure they
do not include evil things like semicolons.
- An alternative to using inline shell scripts with this module is to use
the M(ansible.builtin.script) module possibly together with the M(ansible.builtin.template) module.
- For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
- If the command returns non UTF-8 data, it must be encoded to avoid issues. One option is to pipe
the output through C(base64).
seealso:
- module: ansible.builtin.command
- module: ansible.builtin.raw
- module: ansible.builtin.script
- module: ansible.windows.win_shell
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
- name: Execute the command in remote shell; stdout goes to the specified file on the remote
ansible.builtin.shell: somescript.sh >> somelog.txt
- name: Change the working directory to somedir/ before executing the command
ansible.builtin.shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
# You can also use the 'args' form to provide the options.
- name: This command will change the working directory to somedir/ and will only run when somedir/somelog.txt doesn't exist
ansible.builtin.shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
creates: somelog.txt
# You can also use the 'cmd' parameter instead of free form format.
- name: This command will change the working directory to somedir/
ansible.builtin.shell:
cmd: ls -l | grep log
chdir: somedir/
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
ansible.builtin.shell: cat < /tmp/*txt
args:
executable: /bin/bash
- name: Run a command using a templated variable (always use quote filter to avoid injection)
ansible.builtin.shell: cat {{ myfile|quote }}
# You can use shell to run other executables to perform actions inline
- name: Run expect to wait for a successful PXE boot via out-of-band CIMC
ansible.builtin.shell: |
set timeout 300
spawn ssh admin@{{ cimc_host }}
expect "password:"
send "{{ cimc_password }}\n"
expect "\n{{ cimc_name }}"
send "connect host\n"
expect "pxeboot.n12"
send "\n"
exit 0
args:
executable: /usr/bin/expect
delegate_to: localhost
"""
RETURN = r"""
msg:
description: changed
returned: always
type: bool
sample: True
start:
description: The command execution start time.
returned: always
type: str
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time.
returned: always
type: str
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time.
returned: always
type: str
sample: '0:00:00.325771'
stdout:
description: The command standard output.
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master …'
stderr:
description: The command standard error.
returned: always
type: str
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task.
returned: always
type: str
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success).
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines.
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
stderr_lines:
description: The command standard error split in lines.
returned: always
type: list
sample: [u'ls cannot access foo: No such file or directory', u'ls …']
"""
| 6,830
|
Python
|
.py
| 189
| 32.047619
| 158
| 0.722424
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,177
|
iptables.py
|
ansible_ansible/lib/ansible/modules/iptables.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: iptables
short_description: Modify iptables rules
version_added: "2.0"
author:
- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
- Sébastien DA ROCHA (@sebastiendarocha)
description:
- M(ansible.builtin.iptables) is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel.
- This module does not handle the saving and/or loading of rules, but rather
only manipulates the current rules that are present in memory. This is the
same as the behaviour of the C(iptables) and C(ip6tables) command which
this module uses internally.
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: linux
notes:
- This module just deals with individual rules. If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table on which the command should operate.
- If the kernel is configured with automatic module loading, an attempt will be made
to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
state:
description:
- Whether the rule should be absent or present.
type: str
choices: [ absent, present ]
default: present
action:
description:
- Whether the rule should be appended at the bottom or inserted at the top.
- If the rule already exists the chain will not be modified.
type: str
choices: [ append, insert ]
default: append
version_added: "2.2"
rule_num:
description:
- Insert the rule as the given rule number.
- This works only with O(action=insert).
type: str
version_added: "2.5"
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
type: str
choices: [ ipv4, ipv6 ]
default: ipv4
chain:
description:
- Specify the iptables chain to modify.
- This could be a user-defined chain or one of the standard iptables chains, like
V(INPUT), V(FORWARD), V(OUTPUT), V(PREROUTING), V(POSTROUTING), V(SECMARK) or V(CONNSECMARK).
type: str
protocol:
description:
- The protocol of the rule or of the packet to check.
- The specified protocol can be one of V(tcp), V(udp), V(udplite), V(icmp), V(ipv6-icmp) or V(icmpv6),
V(esp), V(ah), V(sctp) or the special keyword V(all), or it can be a numeric value,
representing one of these protocols or a different one.
- A protocol name from C(/etc/protocols) is also allowed.
- A V(!) argument before the protocol inverts the test.
- The number zero is equivalent to all.
- V(all) will match with all protocols and is taken as default when this option is omitted.
type: str
source:
description:
- Source specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A V(!) argument before the
address specification inverts the sense of the address.
type: str
destination:
description:
- Destination specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A V(!) argument before the
address specification inverts the sense of the address.
type: str
tcp_flags:
description:
- TCP flags specification.
- O(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
type: dict
version_added: "2.4"
suboptions:
flags:
description:
- List of flags you want to examine.
type: list
elements: str
flags_set:
description:
- Flags to be set.
type: list
elements: str
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property.
- The set of matches makes up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an array and work in short-circuit
fashion, in other words if one extension yields false, the evaluation will stop.
type: list
elements: str
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet matches it.
- The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets that decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below).
- If this option is omitted in a rule (and the goto parameter
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
type: str
gateway:
description:
- This specifies the IP address of the host to send the cloned packets.
- This option is only valid when O(jump=TEE).
type: str
version_added: "2.8"
log_prefix:
description:
- Specifies a log text for the rule. Only makes sense with a LOG jump.
type: str
version_added: "2.5"
log_level:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- This parameter is only applicable if O(jump=LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
goto:
description:
- This specifies that the processing should continue in a user-specified chain.
- Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
type: str
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the V(INPUT), V(FORWARD) and V(PREROUTING) chains).
- When the V(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a V(+), then any interface which begins with
this name will match.
- If this option is omitted, any interface name will match.
type: str
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the V(FORWARD), V(OUTPUT) and V(POSTROUTING) chains).
- When the V(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a V(+), then any interface which begins
with this name will match.
- If this option is omitted, any interface name will match.
type: str
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets.
- Since there is no way to tell the source or destination ports of such
a packet (or ICMP type), such a packet will not match any rules which specify them.
- When the "!" argument precedes the fragment argument, the rule will only match head fragments,
or unfragmented packets.
type: str
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during V(INSERT), V(APPEND), V(REPLACE) operations).
type: str
source_port:
description:
- Source port or port range specification.
- This can either be a service name or a port number.
- An inclusive range can also be specified, using the format C(first:last).
- If the first port is omitted, V(0) is assumed; if the last is omitted, V(65535) is assumed.
- If the first port is greater than the second one they will be swapped.
type: str
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped.
This is only valid if the rule also specifies one of the following
protocols: tcp, udp, dccp or sctp."
type: str
destination_ports:
description:
- This specifies multiple destination port numbers or port ranges to match in the multiport module.
- It can only be used in conjunction with the protocols tcp, udp, udplite, dccp and sctp.
type: list
elements: str
default: []
version_added: "2.11"
to_ports:
description:
- This specifies a destination port or range of ports to use, without
this, the destination port is never altered.
- This is only valid if the rule also specifies one of the protocol
V(tcp), V(udp), V(dccp) or V(sctp).
type: str
to_destination:
description:
- This specifies a destination address to use with O(ctstate=DNAT).
- Without this, the destination address is never altered.
type: str
version_added: "2.1"
to_source:
description:
- This specifies a source address to use with O(ctstate=SNAT).
- Without this, the source address is never altered.
type: str
version_added: "2.2"
syn:
description:
- This allows matching packets that have the SYN bit set and the ACK
and RST bits unset.
- When negated, this matches all packets with the RST or the ACK bits set.
type: str
choices: [ ignore, match, negate ]
default: ignore
version_added: "2.5"
set_dscp_mark:
description:
- This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value.
- If the parameter is set, O(jump) is set to V(DSCP).
- Mutually exclusive with O(set_dscp_mark_class).
type: str
version_added: "2.1"
set_dscp_mark_class:
description:
- This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark.
- If the parameter is set, O(jump) is set to V(DSCP).
- Mutually exclusive with O(set_dscp_mark).
type: str
version_added: "2.1"
comment:
description:
- This specifies a comment that will be added to the rule.
type: str
ctstate:
description:
- A list of the connection states to match in the conntrack module.
- Possible values are V(INVALID), V(NEW), V(ESTABLISHED), V(RELATED), V(UNTRACKED), V(SNAT), V(DNAT).
type: list
elements: str
default: []
src_range:
description:
- Specifies the source IP range to match the iprange module.
type: str
version_added: "2.8"
dst_range:
description:
- Specifies the destination IP range to match in the iprange module.
type: str
version_added: "2.8"
match_set:
description:
- Specifies a set name that can be defined by ipset.
- Must be used together with the O(match_set_flags) parameter.
- When the V(!) argument is prepended then it inverts the rule.
- Uses the iptables set extension.
type: str
version_added: "2.11"
match_set_flags:
description:
- Specifies the necessary flags for the match_set parameter.
- Must be used together with the O(match_set) parameter.
- Uses the iptables set extension.
- Choices V(dst,dst) and V(src,src) added in version 2.17.
type: str
choices: [ "src", "dst", "src,dst", "dst,src", "dst,dst", "src,src" ]
version_added: "2.11"
limit:
description:
- Specifies the maximum average number of matches to allow per second.
- The number can specify units explicitly, using C(/second), C(/minute),
C(/hour) or C(/day), or parts of them (so V(5/second) is the same as
V(5/s)).
type: str
limit_burst:
description:
- Specifies the maximum burst before the above limit kicks in.
type: str
version_added: "2.1"
uid_owner:
description:
- Specifies the UID or username to use in the match by owner rule.
- From Ansible 2.6 when the C(!) argument is prepended then the it inverts
the rule to apply instead to all users except that one specified.
type: str
version_added: "2.1"
gid_owner:
description:
- Specifies the GID or group to use in the match by owner rule.
type: str
version_added: "2.9"
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
C(jump=REJECT).'
type: str
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command C(iptables -p icmp -h).
type: str
version_added: "2.2"
flush:
description:
- Flushes the specified table and chain of all rules.
- If no chain is specified then the entire table is purged.
- Ignores all other parameters.
type: bool
default: false
version_added: "2.2"
policy:
description:
- Set the policy for the chain to the given target.
- Only built-in chains can have policies.
- This parameter requires the O(chain) parameter.
- If you specify this parameter, all other parameters will be ignored.
- This parameter is used to set the default policy for the given O(chain).
Do not confuse this with O(jump) parameter.
type: str
choices: [ ACCEPT, DROP, QUEUE, RETURN ]
version_added: "2.2"
wait:
description:
- Wait N seconds for the xtables lock to prevent multiple instances of
the program from running concurrently.
type: str
version_added: "2.10"
chain_management:
description:
- If V(true) and O(state) is V(present), the chain will be created if needed.
- If V(true) and O(state) is V(absent), the chain will be deleted if the only
other parameter passed are O(chain) and optionally O(table).
type: bool
default: false
version_added: "2.13"
numeric:
description:
- This parameter controls the running of the list -action of iptables, which is used internally by the module.
- Does not affect the actual functionality. Use this if iptables hang when creating a chain or altering policy.
- If V(true), then iptables skips the DNS-lookup of the IP addresses in a chain when it uses the list -action.
- Listing is used internally for example when setting a policy or creating a chain.
type: bool
default: false
version_added: "2.15"
"""
EXAMPLES = r"""
- name: Block specific IP
ansible.builtin.iptables:
chain: INPUT
source: 8.8.8.8
jump: DROP
become: yes
- name: Forward port 80 to 8600
ansible.builtin.iptables:
table: nat
chain: PREROUTING
in_interface: eth0
protocol: tcp
match: tcp
destination_port: 80
jump: REDIRECT
to_ports: 8600
comment: Redirect web traffic to port 8600
become: yes
- name: Allow related and established connections
ansible.builtin.iptables:
chain: INPUT
ctstate: ESTABLISHED,RELATED
jump: ACCEPT
become: yes
- name: Allow new incoming SYN packets on TCP port 22 (SSH)
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_port: 22
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new SSH connections.
- name: Match on IP ranges
ansible.builtin.iptables:
chain: FORWARD
src_range: 192.168.1.100-192.168.1.199
dst_range: 10.0.0.1-10.0.0.50
jump: ACCEPT
- name: Allow source IPs defined in ipset "admin_hosts" on port 22
ansible.builtin.iptables:
chain: INPUT
match_set: admin_hosts
match_set_flags: src
destination_port: 22
jump: ALLOW
- name: Tag all outbound tcp packets with DSCP mark 8
ansible.builtin.iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark: 8
protocol: tcp
- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
ansible.builtin.iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark_class: CS1
protocol: tcp
# Create the user-defined chain ALLOWLIST
- iptables:
chain: ALLOWLIST
chain_management: true
# Delete the user-defined chain ALLOWLIST
- iptables:
chain: ALLOWLIST
chain_management: true
state: absent
- name: Insert a rule on line 5
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_port: 8080
jump: ACCEPT
action: insert
rule_num: 5
# Think twice before running following task as this may lock target system
- name: Set the policy for the INPUT chain to DROP
ansible.builtin.iptables:
chain: INPUT
policy: DROP
- name: Reject tcp with tcp-reset
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
reject_with: tcp-reset
ip_version: ipv4
- name: Set tcp flags
ansible.builtin.iptables:
chain: OUTPUT
jump: DROP
protocol: tcp
tcp_flags:
flags: ALL
flags_set:
- ACK
- RST
- SYN
- FIN
- name: Iptables flush filter
ansible.builtin.iptables:
chain: "{{ item }}"
flush: yes
with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
- name: Iptables flush nat
ansible.builtin.iptables:
table: nat
chain: '{{ item }}'
flush: yes
with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
- name: Log packets arriving into an user-defined chain
ansible.builtin.iptables:
chain: LOGGING
action: append
state: present
limit: 2/second
limit_burst: 20
log_prefix: "IPTABLES:INFO: "
log_level: info
- name: Allow connections on multiple ports
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_ports:
- "80"
- "443"
- "8081:8083"
jump: ACCEPT
"""
import re
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
ICMP_TYPE_OPTIONS = dict(
ipv4='--icmp-type',
ipv6='--icmpv6-type',
)
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
if param[0] == '!':
rule.extend(['!', flag, param[1:]])
else:
rule.extend([flag, param])
def append_tcp_flags(rule, param, flag):
if param:
if 'flags' in param and 'flags_set' in param:
rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
def append_match_flag(rule, param, flag, negatable):
if param == 'match':
rule.extend([flag])
elif negatable and param == 'negate':
rule.extend(['!', flag])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def append_wait(rule, param, flag):
if param:
rule.extend([flag, param])
def construct_rule(params):
rule = []
append_wait(rule, params['wait'], '-w')
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
append_param(rule, params['jump'], '-j', False)
if params.get('jump') and params['jump'].lower() == 'tee':
append_param(rule, params['gateway'], '--gateway', False)
append_param(rule, params['log_prefix'], '--log-prefix', False)
append_param(rule, params['log_level'], '--log-level', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_match(rule, params['destination_ports'], 'multiport')
append_csv(rule, params['destination_ports'], '--dports')
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
if params.get('set_dscp_mark') and params.get('jump').lower() != 'dscp':
append_jump(rule, params['set_dscp_mark'], 'DSCP')
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
if params.get('set_dscp_mark_class') and params.get('jump').lower() != 'dscp':
append_jump(rule, params['set_dscp_mark_class'], 'DSCP')
append_match_flag(rule, params['syn'], '--syn', True)
if 'conntrack' in params['match']:
append_csv(rule, params['ctstate'], '--ctstate')
elif 'state' in params['match']:
append_csv(rule, params['ctstate'], '--state')
elif params['ctstate']:
append_match(rule, params['ctstate'], 'conntrack')
append_csv(rule, params['ctstate'], '--ctstate')
if 'iprange' in params['match']:
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
elif params['src_range'] or params['dst_range']:
append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
if 'set' in params['match']:
append_param(rule, params['match_set'], '--match-set', False)
append_match_flag(rule, 'match', params['match_set_flags'], False)
elif params['match_set']:
append_match(rule, params['match_set'], 'set')
append_param(rule, params['match_set'], '--match-set', False)
append_match_flag(rule, 'match', params['match_set_flags'], False)
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_match(rule, params['gid_owner'], 'owner')
append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
append_param(rule, params['gid_owner'], '--gid-owner', False)
if params['jump'] is None:
append_jump(rule, params['reject_with'], 'REJECT')
append_jump(rule, params['set_dscp_mark_class'], 'DSCP')
append_jump(rule, params['set_dscp_mark'], 'DSCP')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(
rule,
params['icmp_type'],
ICMP_TYPE_OPTIONS[params['ip_version']],
False)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if action == '-I' and params['rule_num']:
cmd.extend([params['rule_num']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_rule_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def get_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
if module.params['numeric']:
cmd.append('--numeric')
rc, out, err = module.run_command(cmd, check_rc=True)
chain_header = out.split("\n")[0]
result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
if result:
return result.group(1)
return None
def get_iptables_version(iptables_path, module):
cmd = [iptables_path, '--version']
rc, out, err = module.run_command(cmd, check_rc=True)
return out.split('v')[1].rstrip('\n')
def create_chain(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-N', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def check_chain_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
if module.params['numeric']:
cmd.append('--numeric')
rc, out, err = module.run_command(cmd, check_rc=False)
return (rc == 0)
def delete_chain(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-X', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(type='str', default='present', choices=['absent', 'present']),
action=dict(type='str', default='append', choices=['append', 'insert']),
ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
chain=dict(type='str'),
rule_num=dict(type='str'),
protocol=dict(type='str'),
wait=dict(type='str'),
source=dict(type='str'),
to_source=dict(type='str'),
destination=dict(type='str'),
to_destination=dict(type='str'),
match=dict(type='list', elements='str', default=[]),
tcp_flags=dict(type='dict',
options=dict(
flags=dict(type='list', elements='str'),
flags_set=dict(type='list', elements='str'))
),
jump=dict(type='str'),
gateway=dict(type='str'),
log_prefix=dict(type='str'),
log_level=dict(type='str',
choices=['0', '1', '2', '3', '4', '5', '6', '7',
'emerg', 'alert', 'crit', 'error',
'warning', 'notice', 'info', 'debug'],
default=None,
),
goto=dict(type='str'),
in_interface=dict(type='str'),
out_interface=dict(type='str'),
fragment=dict(type='str'),
set_counters=dict(type='str'),
source_port=dict(type='str'),
destination_port=dict(type='str'),
destination_ports=dict(type='list', elements='str', default=[]),
to_ports=dict(type='str'),
set_dscp_mark=dict(type='str'),
set_dscp_mark_class=dict(type='str'),
comment=dict(type='str'),
ctstate=dict(type='list', elements='str', default=[]),
src_range=dict(type='str'),
dst_range=dict(type='str'),
match_set=dict(type='str'),
match_set_flags=dict(
type='str',
choices=['src', 'dst', 'src,dst', 'dst,src', 'src,src', 'dst,dst']
),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
gid_owner=dict(type='str'),
reject_with=dict(type='str'),
icmp_type=dict(type='str'),
syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
flush=dict(type='bool', default=False),
policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
chain_management=dict(type='bool', default=False),
numeric=dict(type='bool', default=False),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
required_by=dict(
set_dscp_mark=('jump',),
set_dscp_mark_class=('jump',),
),
required_if=[
['jump', 'TEE', ['gateway']],
['jump', 'tee', ['gateway']],
['flush', False, ['chain']],
]
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
chain_management=module.params['chain_management'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
if module.params.get('log_prefix', None) or module.params.get('log_level', None):
if module.params['jump'] is None:
module.params['jump'] = 'LOG'
elif module.params['jump'] != 'LOG':
module.fail_json(msg="Logging options can only be used with the LOG jump target.")
# Check if wait option is supported
iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
module.params['wait'] = ''
else:
module.params['wait'] = None
# Flush the table
if args['flush'] is True:
args['changed'] = True
if not module.check_mode:
flush_table(iptables_path, module, module.params)
# Set the policy
elif module.params['policy']:
current_policy = get_chain_policy(iptables_path, module, module.params)
if not current_policy:
module.fail_json(msg='Can\'t detect current policy')
changed = current_policy != module.params['policy']
args['changed'] = changed
if changed and not module.check_mode:
set_chain_policy(iptables_path, module, module.params)
# Delete the chain if there is no rule in the arguments
elif (args['state'] == 'absent') and not args['rule']:
chain_is_present = check_chain_present(
iptables_path, module, module.params
)
args['changed'] = chain_is_present
if (chain_is_present and args['chain_management'] and not module.check_mode):
delete_chain(iptables_path, module, module.params)
else:
# Create the chain if there are no rule arguments
if (args['state'] == 'present') and not args['rule']:
chain_is_present = check_chain_present(
iptables_path, module, module.params
)
args['changed'] = not chain_is_present
if (not chain_is_present and args['chain_management'] and not module.check_mode):
create_chain(iptables_path, module, module.params)
else:
insert = (module.params['action'] == 'insert')
rule_is_present = check_rule_present(
iptables_path, module, module.params
)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
if args['changed'] is False:
# Target is already up to date
module.exit_json(**args)
# Modify if not check_mode
if not module.check_mode:
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
if __name__ == '__main__':
main()
| 34,708
|
Python
|
.py
| 858
| 33.578089
| 128
| 0.644082
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,178
|
apt_repository.py
|
ansible_ansible/lib/ansible/modules/apt_repository.py
|
# encoding: utf-8
# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
# Copyright: (c) 2013, Alexander Saltanov <asd@mokote.com>
# Copyright: (c) 2014, Rutger Spiertz <rutger@kumina.nl>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: apt_repository
short_description: Add and remove APT repositories
description:
- Add or remove an APT repositories in Ubuntu and Debian.
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: debian
notes:
- This module supports Debian Squeeze (version 6) as well as its successors and derivatives.
seealso:
- module: ansible.builtin.deb822_repository
options:
repo:
description:
- A source string for the repository.
type: str
required: true
state:
description:
- A source string state.
type: str
choices: [ absent, present ]
default: "present"
mode:
description:
- The octal mode for newly created files in C(sources.list.d).
- Default is what system uses (probably 0644).
type: raw
version_added: "1.6"
update_cache:
description:
- Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
type: bool
default: "yes"
aliases: [ update-cache ]
update_cache_retries:
description:
- Amount of retries if the cache update fails. Also see O(update_cache_retry_max_delay).
type: int
default: 5
version_added: '2.10'
update_cache_retry_max_delay:
description:
- Use an exponential backoff delay for each retry (see O(update_cache_retries)) up to this max delay in seconds.
type: int
default: 12
version_added: '2.10'
validate_certs:
description:
- If V(false), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: '1.8'
filename:
description:
- Sets the name of the source list file in C(sources.list.d).
Defaults to a file name based on the repository source url.
The C(.list) extension will be automatically added.
type: str
version_added: '2.1'
codename:
description:
- Override the distribution codename to use for PPA repositories.
Should usually only be set when working with a PPA on
a non-Ubuntu target (for example, Debian or Mint).
type: str
version_added: '2.3'
install_python_apt:
description:
- Whether to automatically try to install the Python apt library or not, if it is not already installed.
Without this library, the module does not work.
- Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
- Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
the system Python, set O(install_python_apt=false) and ensure that the Python apt library
for your Python version is installed some other way.
type: bool
default: true
author:
- Alexander Saltanov (@sashka)
version_added: "0.7"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
- apt-key or gpg
"""
EXAMPLES = """
- name: Add specified repository into sources list
ansible.builtin.apt_repository:
repo: deb http://archive.canonical.com/ubuntu hardy partner
state: present
- name: Add specified repository into sources list using specified filename
ansible.builtin.apt_repository:
repo: deb http://dl.google.com/linux/chrome/deb/ stable main
state: present
filename: google-chrome
- name: Add source repository into sources list
ansible.builtin.apt_repository:
repo: deb-src http://archive.canonical.com/ubuntu hardy partner
state: present
- name: Remove specified repository from sources list
ansible.builtin.apt_repository:
repo: deb http://archive.canonical.com/ubuntu hardy partner
state: absent
- name: Add nginx stable repository from PPA and install its signing key on Ubuntu target
ansible.builtin.apt_repository:
repo: ppa:nginx/stable
- name: Add nginx stable repository from PPA and install its signing key on Debian target
ansible.builtin.apt_repository:
repo: 'ppa:nginx/stable'
codename: trusty
- name: One way to avoid apt_key once it is removed from your distro
block:
- name: somerepo |no apt key
ansible.builtin.get_url:
url: https://download.example.com/linux/ubuntu/gpg
dest: /etc/apt/keyrings/somerepo.asc
- name: somerepo | apt source
ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
"""
RETURN = """
repo:
description: A source string for the repository
returned: always
type: str
sample: "deb https://artifacts.elastic.co/packages/6.x/apt stable main"
sources_added:
description: List of sources added
returned: success, sources were added
type: list
sample: ["/etc/apt/sources.list.d/artifacts_elastic_co_packages_6_x_apt.list"]
version_added: "2.15"
sources_removed:
description: List of sources removed
returned: success, sources were removed
type: list
sample: ["/etc/apt/sources.list.d/artifacts_elastic_co_packages_6_x_apt.list"]
version_added: "2.15"
"""
import copy
import glob
import json
import os
import re
import secrets
import sys
import tempfile
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.file import S_IRWU_RG_RO as DEFAULT_SOURCES_PERM
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.common.locale import get_best_parsable_locale
try:
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
except ImportError:
apt = apt_pkg = aptsources_distro = distro = None
HAVE_PYTHON_APT = False
APT_KEY_DIRS = ['/etc/apt/keyrings', '/etc/apt/trusted.gpg.d', '/usr/share/keyrings']
VALID_SOURCE_TYPES = ('deb', 'deb-src')
def install_python_apt(module, apt_pkg_name):
if not module.check_mode:
apt_get_path = module.get_bin_path('apt-get')
if apt_get_path:
rc, so, se = module.run_command([apt_get_path, 'update'])
if rc != 0:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
rc, so, se = module.run_command([apt_get_path, 'install', apt_pkg_name, '-y', '-q'])
if rc != 0:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
else:
module.fail_json(msg="%s must be installed to use check mode" % apt_pkg_name)
class InvalidSource(Exception):
pass
# Simple version of aptsources.sourceslist.SourcesList.
# No advanced logic and no backups inside.
class SourcesList(object):
def __init__(self, module):
self.module = module
self.files = {} # group sources by file
self.files_mapping = {} # internal DS for tracking symlinks
# Repositories that we're adding -- used to implement mode param
self.new_repos = set()
self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
# read sources.list if it exists
if os.path.isfile(self.default_file):
self.load(self.default_file)
# read sources.list.d
for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
if os.path.islink(file):
self.files_mapping[file] = os.readlink(file)
self.load(file)
def __iter__(self):
"""Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped."""
for file, sources in self.files.items():
for n, valid, enabled, source, comment in sources:
if valid:
yield file, n, enabled, source, comment
def _expand_path(self, filename):
if '/' in filename:
return filename
else:
return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
def _suggest_filename(self, line):
def _cleanup_filename(s):
filename = self.module.params['filename']
if filename is not None:
return filename
return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
def _strip_username_password(s):
if '@' in s:
s = s.split('@', 1)
s = s[-1]
return s
# Drop options and protocols.
line = re.sub(r'\[[^\]]+\]', '', line)
line = re.sub(r'\w+://', '', line)
# split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
# Drop usernames and passwords
parts[0] = _strip_username_password(parts[0])
return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
def _parse(self, line, raise_if_invalid_or_disabled=False):
valid = False
enabled = True
source = ''
comment = ''
line = line.strip()
if line.startswith('#'):
enabled = False
line = line[1:]
# Check for another "#" in the line and treat a part after it as a comment.
i = line.find('#')
if i > 0:
comment = line[i + 1:].strip()
line = line[:i]
# Split a source into substring to make sure that it is source spec.
# Duplicated whitespaces in a valid source spec will be removed.
source = line.strip()
if source:
chunks = source.split()
if chunks[0] in VALID_SOURCE_TYPES:
valid = True
source = ' '.join(chunks)
if raise_if_invalid_or_disabled and (not valid or not enabled):
raise InvalidSource(line)
return valid, enabled, source, comment
@staticmethod
def _apt_cfg_file(filespec):
"""
Wrapper for `apt_pkg` module for running with Python 2.5
"""
try:
result = apt_pkg.config.find_file(filespec)
except AttributeError:
result = apt_pkg.Config.FindFile(filespec)
return result
@staticmethod
def _apt_cfg_dir(dirspec):
"""
Wrapper for `apt_pkg` module for running with Python 2.5
"""
try:
result = apt_pkg.config.find_dir(dirspec)
except AttributeError:
result = apt_pkg.Config.FindDir(dirspec)
return result
def load(self, file):
group = []
f = open(file, 'r')
for n, line in enumerate(f):
valid, enabled, source, comment = self._parse(line)
group.append((n, valid, enabled, source, comment))
self.files[file] = group
def save(self):
for filename, sources in list(self.files.items()):
if sources:
d, fn = os.path.split(filename)
try:
os.makedirs(d)
except OSError as ex:
if not os.path.isdir(d):
self.module.fail_json("Failed to create directory %s: %s" % (d, to_native(ex)))
try:
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
except (OSError, IOError) as e:
self.module.fail_json(msg='Unable to create temp file at "%s" for apt source: %s' % (d, to_native(e)))
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
line = ''.join(chunks)
try:
f.write(line)
except IOError as ex:
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(ex)))
if filename in self.files_mapping:
# Write to symlink target instead of replacing symlink as a normal file
self.module.atomic_move(tmp_path, self.files_mapping[filename])
else:
self.module.atomic_move(tmp_path, filename)
# allow the user to override the default mode
if filename in self.new_repos:
this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM)
self.module.set_mode_if_different(filename, this_mode, False)
else:
del self.files[filename]
if os.path.exists(filename):
os.remove(filename)
def dump(self):
dumpstruct = {}
for filename, sources in self.files.items():
if sources:
lines = []
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
lines.append(''.join(chunks))
dumpstruct[filename] = ''.join(lines)
return dumpstruct
def _choice(self, new, old):
if new is None:
return old
return new
def modify(self, file, n, enabled=None, source=None, comment=None):
"""
This function to be used with iterator, so we don't care of invalid sources.
If source, enabled, or comment is None, original value from line ``n`` will be preserved.
"""
valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
def _add_valid_source(self, source_new, comment_new, file):
# We'll try to reuse disabled source if we have it.
# If we have more than one entry, we will enable them all - no advanced logic, remember.
self.module.log('adding source file: %s | %s | %s' % (source_new, comment_new, file))
found = False
for filename, n, enabled, source, comment in self:
if source == source_new:
self.modify(filename, n, enabled=True)
found = True
if not found:
if file is None:
file = self.default_file
else:
file = self._expand_path(file)
if file not in self.files:
self.files[file] = []
files = self.files[file]
files.append((len(files), True, True, source_new, comment_new))
self.new_repos.add(file)
def add_source(self, line, comment='', file=None):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
# Prefer separate files for new sources.
self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
def _remove_valid_source(self, source):
# If we have more than one entry, we will remove them all (not comment, remove!)
for filename, n, enabled, src, comment in self:
if source == src and enabled:
self.files[filename].pop(n)
def remove_source(self, line):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
class UbuntuSourcesList(SourcesList):
# prefer api.launchpad.net over launchpad.net/api
# see: https://github.com/ansible/ansible/pull/81978#issuecomment-1767062178
LP_API = 'https://api.launchpad.net/1.0/~%s/+archive/%s'
PPA_URI = 'https://ppa.launchpadcontent.net'
def __init__(self, module):
self.module = module
self.codename = module.params['codename'] or distro.codename
super(UbuntuSourcesList, self).__init__(module)
self.apt_key_bin = self.module.get_bin_path('apt-key', required=False)
self.gpg_bin = self.module.get_bin_path('gpg', required=False)
if not self.apt_key_bin and not self.gpg_bin:
self.module.fail_json(msg='Either apt-key or gpg binary is required, but neither could be found')
def __deepcopy__(self, memo=None):
return UbuntuSourcesList(self.module)
def _get_ppa_info(self, owner_name, ppa_name):
lp_api = self.LP_API % (owner_name, ppa_name)
headers = dict(Accept='application/json')
response, info = fetch_url(self.module, lp_api, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
return json.loads(to_native(response.read()))
def _expand_ppa(self, path):
ppa = path.split(':')[1]
ppa_owner = ppa.split('/')[0]
try:
ppa_name = ppa.split('/')[1]
except IndexError:
ppa_name = 'ppa'
line = 'deb %s/%s/%s/ubuntu %s main' % (self.PPA_URI, ppa_owner, ppa_name, self.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
if self.apt_key_bin:
locale = get_best_parsable_locale(self.module)
APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale)
self.module.run_command_environ_update = APT_ENV
rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True)
found = bool(not err or 'nothing exported' not in err)
else:
found = self._gpg_key_exists(key_fingerprint)
return found
def _gpg_key_exists(self, key_fingerprint):
found = False
keyfiles = ['/etc/apt/trusted.gpg'] # main gpg repo for apt
for other_dir in APT_KEY_DIRS:
# add other known sources of gpg sigs for apt, skip hidden files
keyfiles.extend([os.path.join(other_dir, x) for x in os.listdir(other_dir) if not x.startswith('.')])
for key_file in keyfiles:
if os.path.exists(key_file):
try:
rc, out, err = self.module.run_command([self.gpg_bin, '--list-packets', key_file])
except (IOError, OSError) as e:
self.debug("Could check key against file %s: %s" % (key_file, to_native(e)))
continue
if key_fingerprint in out:
found = True
break
return found
# https://www.linuxuprising.com/2021/01/apt-key-is-deprecated-how-to-add.html
def add_source(self, line, comment='', file=None):
if line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(line)
if source in self.repos_urls:
# repository already exists
return
info = self._get_ppa_info(ppa_owner, ppa_name)
# add gpg sig if needed
if not self._key_already_exists(info['signing_key_fingerprint']):
# TODO: report file that would have been added if not check_mode
keyfile = ''
if not self.module.check_mode:
if self.apt_key_bin:
command = [self.apt_key_bin, 'adv', '--recv-keys', '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80',
info['signing_key_fingerprint']]
else:
# use first available key dir, in order of preference
for keydir in APT_KEY_DIRS:
if os.path.exists(keydir):
break
else:
self.module.fail_json("Unable to find any existing apt gpgp repo directories, tried the following: %s" % ', '.join(APT_KEY_DIRS))
keyfile = '%s/%s-%s-%s.gpg' % (keydir, os.path.basename(source).replace(' ', '-'), ppa_owner, ppa_name)
command = [self.gpg_bin, '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--export', info['signing_key_fingerprint']]
rc, stdout, stderr = self.module.run_command(command, check_rc=True, encoding=None)
if keyfile:
# using gpg we must write keyfile ourselves
if len(stdout) == 0:
self.module.fail_json(msg='Unable to get required signing key', rc=rc, stderr=stderr, command=command)
try:
with open(keyfile, 'wb') as f:
f.write(stdout)
self.module.log('Added repo key "%s" for apt to file "%s"' % (info['signing_key_fingerprint'], keyfile))
except (OSError, IOError) as e:
self.module.fail_json(msg='Unable to add required signing key for%s ', rc=rc, stderr=stderr, error=to_native(e))
# apt source file
file = file or self._suggest_filename('%s_%s' % (line, self.codename))
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
file = file or self._suggest_filename(source)
self._add_valid_source(source, comment, file)
def remove_source(self, line):
if line.startswith('ppa:'):
source = self._expand_ppa(line)[0]
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
@property
def repos_urls(self):
_repositories = []
for parsed_repos in self.files.values():
for parsed_repo in parsed_repos:
valid = parsed_repo[1]
enabled = parsed_repo[2]
source_line = parsed_repo[3]
if not valid or not enabled:
continue
if source_line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(source_line)
_repositories.append(source)
else:
_repositories.append(source_line)
return _repositories
def revert_sources_list(sources_before, sources_after, sourceslist_before):
"""Revert the sourcelist files to their previous state."""
# First remove any new files that were created:
for filename in set(sources_after.keys()).difference(sources_before.keys()):
if os.path.exists(filename):
os.remove(filename)
# Now revert the existing files to their former state:
sourceslist_before.save()
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
mode=dict(type='raw'),
update_cache=dict(type='bool', default=True, aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
filename=dict(type='str'),
# This should not be needed, but exists as a failsafe
install_python_apt=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
codename=dict(type='str'),
),
supports_check_mode=True,
)
params = module.params
repo = module.params['repo']
state = module.params['state']
update_cache = module.params['update_cache']
# Note: mode is referenced in SourcesList class via the passed in module (self here)
sourceslist = None
if not HAVE_PYTHON_APT:
# This interpreter can't see the apt Python library- we'll do the following to try and fix that:
# 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
# 2) finding none, try to install a matching python-apt package for the current interpreter version;
# we limit to the current interpreter version to try and avoid installing a whole other Python just
# for apt support
# 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
# the current interpreter again, but we'll let it respawn anyway for simplicity)
# 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
# made any more complex than it already is to try and cover more, eg, custom interpreters taking over
# system locations)
apt_pkg_name = 'python3-apt'
if has_respawned():
# this shouldn't be possible; short-circuit early if it happens...
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
interpreters = ['/usr/bin/python3', '/usr/bin/python']
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
# don't make changes if we're in check_mode
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % apt_pkg_name)
if params['install_python_apt']:
install_python_apt(module, apt_pkg_name)
else:
module.fail_json(msg='%s is not installed, and install_python_apt is False' % apt_pkg_name)
# try again to find the bindings in common places
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
# NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
else:
# we've done all we can do; just tell the user it's busted and get out
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
if not repo:
module.fail_json(msg='Please set argument \'repo\' to a non-empty value')
if isinstance(distro, aptsources_distro.Distribution):
sourceslist = UbuntuSourcesList(module)
else:
module.fail_json(msg='Module apt_repository is not supported on target.')
sourceslist_before = copy.deepcopy(sourceslist)
sources_before = sourceslist.dump()
try:
if state == 'present':
sourceslist.add_source(repo)
elif state == 'absent':
sourceslist.remove_source(repo)
except InvalidSource as ex:
module.fail_json(msg='Invalid repository string: %s' % to_native(ex))
sources_after = sourceslist.dump()
changed = sources_before != sources_after
diff = []
sources_added = set()
sources_removed = set()
if changed:
sources_added = set(sources_after.keys()).difference(sources_before.keys())
sources_removed = set(sources_before.keys()).difference(sources_after.keys())
if module._diff:
for filename in set(sources_added.union(sources_removed)):
diff.append({'before': sources_before.get(filename, ''),
'after': sources_after.get(filename, ''),
'before_header': (filename, '/dev/null')[filename not in sources_before],
'after_header': (filename, '/dev/null')[filename not in sources_after]})
if changed and not module.check_mode:
try:
err = ''
sourceslist.save()
if update_cache:
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = secrets.randbelow(1000) / 1000.0
cache = apt.Cache()
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as fetch_failed_exc:
err = fetch_failed_exc
module.warn(
f"Failed to update cache after {retry + 1} due "
f"to {to_native(fetch_failed_exc)} retry, retrying"
)
# Use exponential backoff with a max fail count, plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to update the cache again")
else:
revert_sources_list(sources_before, sources_after, sourceslist_before)
msg = (
f"Failed to update apt cache after {update_cache_retries} retries: "
f"{err if err else 'unknown reason'}"
)
module.fail_json(msg=msg)
except (OSError, IOError) as ex:
revert_sources_list(sources_before, sources_after, sourceslist_before)
module.fail_json(msg=to_native(ex))
module.exit_json(changed=changed, repo=repo, sources_added=sources_added, sources_removed=sources_removed, state=state, diff=diff)
if __name__ == '__main__':
main()
| 31,330
|
Python
|
.py
| 653
| 36.967841
| 157
| 0.601742
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,179
|
service.py
|
ansible_ansible/lib/ansible/modules/service.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: service
version_added: "0.1"
short_description: Manage services
description:
- Controls services on remote hosts. Supported init systems include BSD init,
OpenRC, SysV, Solaris SMF, systemd, upstart.
- This module acts as a proxy to the underlying service manager module. While all arguments will be passed to the
underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
of module arguments that all service manager modules support.
- This module is a proxy for multiple more specific service manager modules
(such as M(ansible.builtin.systemd) and M(ansible.builtin.sysvinit)).
This allows management of a heterogeneous environment of machines without creating a specific task for
each service manager. The module to be executed is determined by the O(use) option, which defaults to the
service manager discovered by M(ansible.builtin.setup). If M(ansible.builtin.setup) was not yet run, this module may run it.
- For Windows targets, use the M(ansible.windows.win_service) module instead.
options:
name:
description:
- Name of the service.
type: str
required: true
state:
description:
- V(started)/V(stopped) are idempotent actions that will not run
commands unless necessary.
- V(restarted) will always bounce the service.
- V(reloaded) will always reload.
- At least one of O(state) and O(enabled) are required.
- Note that V(reloaded) will start the service if it is not already started,
even if your chosen init system wouldn't normally.
type: str
choices: [ reloaded, restarted, started, stopped ]
sleep:
description:
- If the service is being V(restarted) then sleep this many seconds
between the stop and start command.
- This helps to work around badly-behaving init scripts that exit immediately
after signaling a process to stop.
- Not all service managers support sleep, i.e when using systemd this setting will be ignored.
type: int
version_added: "1.3"
pattern:
description:
- If the service does not respond to the status command, name a
substring to look for as would be found in the output of the C(ps)
command as a stand-in for a status result.
- If the string is found, the service will be assumed to be started.
- While using remote hosts with systemd this setting will be ignored.
type: str
version_added: "0.7"
enabled:
description:
- Whether the service should start on boot.
- At least one of O(state) and O(enabled) are required.
type: bool
runlevel:
description:
- For OpenRC init scripts (e.g. Gentoo) only.
- The runlevel that this service belongs to.
- While using remote hosts with systemd this setting will be ignored.
type: str
default: default
arguments:
description:
- Additional arguments provided on the command line.
- While using remote hosts with systemd this setting will be ignored.
type: str
default: ''
aliases: [ args ]
use:
description:
- The service module actually uses system specific modules, normally through auto detection, this setting can force a specific module.
- Normally it uses the value of the C(ansible_service_mgr) fact and falls back to the C(ansible.legacy.service) module when none matching is found.
- The 'old service module' still uses autodetection and in no way does it correspond to the C(service) command.
type: str
default: auto
version_added: 2.2
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: full
bypass_host_loop:
support: none
check_mode:
details: support depends on the underlying plugin invoked
support: N/A
diff_mode:
details: support depends on the underlying plugin invoked
support: N/A
platform:
details: The support depends on the availability for the specific plugin for each platform and if fact gathering is able to detect it
platforms: all
notes:
- For AIX, group subsystem names can be used.
- The C(service) command line utility is not part of any service manager system but a convenience.
It does not have a standard implementation across systems, and this action cannot use it directly.
Though it might be used if found in certain circumstances, the detected system service manager is normally preferred.
seealso:
- module: ansible.windows.win_service
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
- name: Start service httpd, if not started
ansible.builtin.service:
name: httpd
state: started
- name: Stop service httpd, if started
ansible.builtin.service:
name: httpd
state: stopped
- name: Restart service httpd, in all cases
ansible.builtin.service:
name: httpd
state: restarted
- name: Reload service httpd, in all cases
ansible.builtin.service:
name: httpd
state: reloaded
- name: Enable service httpd, and not touch the state
ansible.builtin.service:
name: httpd
enabled: yes
- name: Start service foo, based on running process /usr/bin/foo
ansible.builtin.service:
name: foo
pattern: /usr/bin/foo
state: started
- name: Restart network service for interface eth0
ansible.builtin.service:
name: network
state: restarted
args: eth0
"""
RETURN = r"""#"""
import glob
import json
import os
import platform
import re
import select
import shlex
import subprocess
import tempfile
import time
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils.service import fail_if_missing, is_systemd_managed
from ansible.module_utils.six import PY2, b
class Service(object):
"""
This is the generic Service manipulation class that is subclassed
based on platform.
A subclass should override the following action methods:-
- get_service_tools
- service_enable
- get_service_status
- service_control
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(Service)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.state = module.params['state']
self.sleep = module.params['sleep']
self.pattern = module.params['pattern']
self.enable = module.params['enabled']
self.runlevel = module.params['runlevel']
self.changed = False
self.running = None
self.crashed = None
self.action = None
self.svc_cmd = None
self.svc_initscript = None
self.svc_initctl = None
self.enable_cmd = None
self.arguments = module.params.get('arguments', '')
self.rcconf_file = None
self.rcconf_key = None
self.rcconf_value = None
self.svc_change = False
# ===========================================
# Platform specific methods (must be replaced by subclass).
def get_service_tools(self):
self.module.fail_json(msg="get_service_tools not implemented on target platform")
def service_enable(self):
self.module.fail_json(msg="service_enable not implemented on target platform")
def get_service_status(self):
self.module.fail_json(msg="get_service_status not implemented on target platform")
def service_control(self):
self.module.fail_json(msg="service_control not implemented on target platform")
# ===========================================
# Generic methods that should be used on all platforms.
def execute_command(self, cmd, daemonize=False):
locale = get_best_parsable_locale(self.module)
lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
# Most things don't need to be daemonized
if not daemonize:
# chkconfig localizes messages and we're screen scraping so make
# sure we use the C locale
return self.module.run_command(cmd, environ_update=lang_env)
# This is complex because daemonization is hard for people.
# What we do is daemonize a part of this module, the daemon runs the
# command, picks up the return code and output, and returns it to the
# main process.
pipe = os.pipe()
pid = os.fork()
if pid == 0:
os.close(pipe[0])
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
if fd != 0:
os.dup2(fd, 0)
if fd != 1:
os.dup2(fd, 1)
if fd != 2:
os.dup2(fd, 2)
if fd not in (0, 1, 2):
os.close(fd)
# Make us a daemon. Yes, that's all it takes.
pid = os.fork()
if pid > 0:
os._exit(0)
os.setsid()
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
# Start the command
if PY2:
# Python 2.6's shlex.split can't handle text strings correctly
cmd = to_bytes(cmd, errors='surrogate_or_strict')
cmd = shlex.split(cmd)
else:
# Python3.x shex.split text strings.
cmd = to_text(cmd, errors='surrogate_or_strict')
cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(cmd)]
# In either of the above cases, pass a list of byte strings to Popen
# chkconfig localizes messages and we're screen scraping so make
# sure we use the C locale
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=lang_env, preexec_fn=lambda: os.close(pipe[1]))
stdout = b("")
stderr = b("")
fds = [p.stdout, p.stderr]
# Wait for all output, or until the main process is dead and its output is done.
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if not (rfd + wfd + efd) and p.poll() is not None:
break
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 4096)
if not dat:
fds.remove(p.stdout)
stdout += dat
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 4096)
if not dat:
fds.remove(p.stderr)
stderr += dat
p.wait()
# Return a JSON blob to parent
blob = json.dumps([p.returncode, to_text(stdout), to_text(stderr)])
os.write(pipe[1], to_bytes(blob, errors='surrogate_or_strict'))
os.close(pipe[1])
os._exit(0)
elif pid == -1:
self.module.fail_json(msg="unable to fork")
else:
os.close(pipe[1])
os.waitpid(pid, 0)
# Wait for data from daemon process and process it.
data = b("")
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
dat = os.read(pipe[0], 4096)
if not dat:
break
data += dat
return json.loads(to_text(data, errors='surrogate_or_strict'))
def check_ps(self):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = self.module.get_bin_path('ps', True)
(rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
self.running = False
lines = psout.split("\n")
for line in lines:
if self.pattern in line and "pattern=" not in line:
# so as to not confuse ./hacking/test-module.py
self.running = True
break
def check_service_changed(self):
if self.state and self.running is None:
self.module.fail_json(msg="failed determining service state, possible typo of service name?")
# Find out if state has changed
if not self.running and self.state in ["reloaded", "started"]:
self.svc_change = True
elif self.running and self.state in ["reloaded", "stopped"]:
self.svc_change = True
elif self.state == "restarted":
self.svc_change = True
if self.module.check_mode and self.svc_change:
self.module.exit_json(changed=True, msg='service state changed')
def modify_service_state(self):
# Only do something if state will change
if self.svc_change:
# Control service
if self.state in ['started']:
self.action = "start"
elif not self.running and self.state == 'reloaded':
self.action = "start"
elif self.state == 'stopped':
self.action = "stop"
elif self.state == 'reloaded':
self.action = "reload"
elif self.state == 'restarted':
self.action = "restart"
if self.module.check_mode:
self.module.exit_json(changed=True, msg='changing service state')
return self.service_control()
else:
# If nothing needs to change just say all is well
rc = 0
err = ''
out = ''
return rc, out, err
def service_enable_rcconf(self):
if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None:
self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value")
self.changed = None
entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value)
with open(self.rcconf_file, "r") as RCFILE:
new_rc_conf = []
# Build a list containing the possibly modified file.
for rcline in RCFILE:
# Parse line removing whitespaces, quotes, etc.
rcarray = shlex.split(rcline, comments=True)
if len(rcarray) >= 1 and '=' in rcarray[0]:
(key, value) = rcarray[0].split("=", 1)
if key == self.rcconf_key:
if value.upper() == self.rcconf_value:
# Since the proper entry already exists we can stop iterating.
self.changed = False
break
else:
# We found the key but the value is wrong, replace with new entry.
rcline = entry
self.changed = True
# Add line to the list.
new_rc_conf.append(rcline.strip() + '\n')
# If we did not see any trace of our entry we need to add it.
if self.changed is None:
new_rc_conf.append(entry)
self.changed = True
if self.changed is True:
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
# Create a temporary file next to the current rc.conf (so we stay on the same filesystem).
# This way the replacement operation is atomic.
rcconf_dir = os.path.dirname(self.rcconf_file)
rcconf_base = os.path.basename(self.rcconf_file)
(TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base)
# Write out the contents of the list into our temporary file.
for rcline in new_rc_conf:
os.write(TMP_RCCONF, rcline.encode())
# Close temporary file.
os.close(TMP_RCCONF)
# Replace previous rc.conf.
self.module.atomic_move(tmp_rcconf_file, self.rcconf_file)
class LinuxService(Service):
"""
This is the Linux Service manipulation class - it is currently supporting
a mixture of binaries and init scripts for controlling services started at
boot, as well as for controlling the current state.
"""
platform = 'Linux'
distribution = None
def get_service_tools(self):
paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
binaries = ['service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv']
initpaths = ['/etc/init.d']
location = dict()
for binary in binaries:
location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
for initdir in initpaths:
initscript = "%s/%s" % (initdir, self.name)
if os.path.isfile(initscript):
self.svc_initscript = initscript
def check_systemd():
# tools must be installed
if location.get('systemctl', False):
return is_systemd_managed(self.module)
return False
# Locate a tool to enable/disable a service
if check_systemd():
# service is managed by systemd
self.__systemd_unit = self.name
self.svc_cmd = location['systemctl']
self.enable_cmd = location['systemctl']
elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name):
# service is managed by upstart
self.enable_cmd = location['initctl']
# set the upstart version based on the output of 'initctl version'
self.upstart_version = LooseVersion('0.0.0')
try:
version_re = re.compile(r'\(upstart (.*)\)')
rc, stdout, stderr = self.module.run_command('%s version' % location['initctl'])
if rc == 0:
res = version_re.search(stdout)
if res:
self.upstart_version = LooseVersion(res.groups()[0])
except Exception:
pass # we'll use the default of 0.0.0
self.svc_cmd = location['initctl']
elif location.get('rc-service', False):
# service is managed by OpenRC
self.svc_cmd = location['rc-service']
self.enable_cmd = location['rc-update']
return # already have service start/stop tool too!
elif self.svc_initscript:
# service is managed by with SysV init scripts
if location.get('update-rc.d', False):
# and uses update-rc.d
self.enable_cmd = location['update-rc.d']
elif location.get('insserv', None):
# and uses insserv
self.enable_cmd = location['insserv']
elif location.get('chkconfig', False):
# and uses chkconfig
self.enable_cmd = location['chkconfig']
if self.enable_cmd is None:
fail_if_missing(self.module, False, self.name, msg='host')
# If no service control tool selected yet, try to see if 'service' is available
if self.svc_cmd is None and location.get('service', False):
self.svc_cmd = location['service']
# couldn't find anything yet
if self.svc_cmd is None and not self.svc_initscript:
self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
if location.get('initctl', False):
self.svc_initctl = location['initctl']
def get_systemd_service_enabled(self):
def sysv_exists(name):
script = '/etc/init.d/' + name
return os.access(script, os.X_OK)
def sysv_is_enabled(name):
return bool(glob.glob('/etc/rc?.d/S??' + name))
service_name = self.__systemd_unit
(rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,))
if rc == 0:
return True
elif out.startswith('disabled'):
return False
elif sysv_exists(service_name):
return sysv_is_enabled(service_name)
else:
return False
def get_systemd_status_dict(self):
# Check status first as show will not fail if service does not exist
(rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,))
if rc != 0:
self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
elif 'LoadState=not-found' in out:
self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err))
key = None
value_buffer = []
status_dict = {}
for line in out.splitlines():
if '=' in line:
if not key:
key, value = line.split('=', 1)
# systemd fields that are shell commands can be multi-line
# We take a value that begins with a "{" as the start of
# a shell command and a line that ends with "}" as the end of
# the command
if value.lstrip().startswith('{'):
if value.rstrip().endswith('}'):
status_dict[key] = value
key = None
else:
value_buffer.append(value)
else:
status_dict[key] = value
key = None
else:
if line.rstrip().endswith('}'):
status_dict[key] = '\n'.join(value_buffer)
key = None
else:
value_buffer.append(value)
else:
value_buffer.append(value)
return status_dict
def get_systemd_service_status(self):
d = self.get_systemd_status_dict()
if d.get('ActiveState') == 'active':
# run-once services (for which a single successful exit indicates
# that they are running as designed) should not be restarted here.
# Thus, we are not checking d['SubState'].
self.running = True
self.crashed = False
elif d.get('ActiveState') == 'failed':
self.running = False
self.crashed = True
elif d.get('ActiveState') is None:
self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
else:
self.running = False
self.crashed = False
return self.running
def get_service_status(self):
if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
return self.get_systemd_service_status()
self.action = "status"
rc, status_stdout, status_stderr = self.service_control()
# if we have decided the service is managed by upstart, we check for some additional output...
if self.svc_initctl and self.running is None:
# check the job status by upstart response
initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s %s" % (self.svc_initctl, self.name, self.arguments))
if "stop/waiting" in initctl_status_stdout:
self.running = False
elif "start/running" in initctl_status_stdout:
self.running = True
if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name))
self.running = "started" in openrc_status_stdout
self.crashed = "crashed" in openrc_status_stderr
# Prefer a non-zero return code. For reference, see:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
if self.running is None and rc in [1, 2, 3, 4, 69]:
self.running = False
# if the job status is still not known check it by status output keywords
# Only check keywords if there's only one line of output (some init
# scripts will output verbosely in case of error and those can emit
# keywords that are picked up as false positives
if self.running is None and status_stdout.count('\n') <= 1:
# first transform the status output that could irritate keyword matching
cleanout = status_stdout.lower().replace(self.name.lower(), '')
if "stop" in cleanout:
self.running = False
elif "run" in cleanout:
self.running = not ("not " in cleanout)
elif "start" in cleanout and "not " not in cleanout:
self.running = True
elif 'could not access pid file' in cleanout:
self.running = False
elif 'is dead and pid file exists' in cleanout:
self.running = False
elif 'dead but subsys locked' in cleanout:
self.running = False
elif 'dead but pid file exists' in cleanout:
self.running = False
# if the job status is still not known and we got a zero for the
# return code, assume here that the service is running
if self.running is None and rc == 0:
self.running = True
# if the job status is still not known check it by special conditions
if self.running is None:
if self.name == 'iptables' and "ACCEPT" in status_stdout:
# iptables status command output is lame
# TODO: lookup if we can use a return code for this instead?
self.running = True
return self.running
def service_enable(self):
if self.enable_cmd is None:
self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name)
self.changed = True
action = None
#
# Upstart's initctl
#
if self.enable_cmd.endswith("initctl"):
def write_to_override_file(file_name, file_contents, ):
with open(file_name, 'w') as override_file:
override_file.write(file_contents)
initpath = '/etc/init'
if self.upstart_version >= LooseVersion('0.6.7'):
manreg = re.compile(r'^manual\s*$', re.M | re.I)
config_line = 'manual\n'
else:
manreg = re.compile(r'^start on manual\s*$', re.M | re.I)
config_line = 'start on manual\n'
conf_file_name = "%s/%s.conf" % (initpath, self.name)
override_file_name = "%s/%s.override" % (initpath, self.name)
# Check to see if files contain the manual line in .conf and fail if True
with open(conf_file_name) as conf_file_fh:
conf_file_content = conf_file_fh.read()
if manreg.search(conf_file_content):
self.module.fail_json(msg="manual stanza not supported in a .conf file")
self.changed = False
if os.path.exists(override_file_name):
with open(override_file_name) as override_fh:
override_file_contents = override_fh.read()
# Remove manual stanza if present and service enabled
if self.enable and manreg.search(override_file_contents):
self.changed = True
override_state = manreg.sub('', override_file_contents)
# Add manual stanza if not present and service disabled
elif not (self.enable) and not (manreg.search(override_file_contents)):
self.changed = True
override_state = '\n'.join((override_file_contents, config_line))
# service already in desired state
else:
pass
# Add file with manual stanza if service disabled
elif not (self.enable):
self.changed = True
override_state = config_line
else:
# service already in desired state
pass
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
# The initctl method of enabling and disabling services is much
# different than for the other service methods. So actually
# committing the change is done in this conditional and then we
# skip the boilerplate at the bottom of the method
if self.changed:
try:
write_to_override_file(override_file_name, override_state)
except Exception:
self.module.fail_json(msg='Could not modify override file')
return
#
# SysV's chkconfig
#
if self.enable_cmd.endswith("chkconfig"):
if self.enable:
action = 'on'
else:
action = 'off'
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if 'chkconfig --add %s' % self.name in err:
self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if self.name not in out:
self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
# TODO: look back on why this is here
# state = out.split()[-1]
# Check if we're already in the correct state
if "3:%s" % action in out and "5:%s" % action in out:
self.changed = False
return
#
# Systemd's systemctl
#
if self.enable_cmd.endswith("systemctl"):
if self.enable:
action = 'enable'
else:
action = 'disable'
# Check if we're already in the correct state
service_enabled = self.get_systemd_service_enabled()
# self.changed should already be true
if self.enable == service_enabled:
self.changed = False
return
#
# OpenRC's rc-update
#
if self.enable_cmd.endswith("rc-update"):
if self.enable:
action = 'add'
else:
action = 'delete'
(rc, out, err) = self.execute_command("%s show" % self.enable_cmd)
for line in out.splitlines():
service_name, runlevels = line.split('|')
service_name = service_name.strip()
if service_name != self.name:
continue
runlevels = re.split(r'\s+', runlevels)
# service already enabled for the runlevel
if self.enable and self.runlevel in runlevels:
self.changed = False
# service already disabled for the runlevel
elif not self.enable and self.runlevel not in runlevels:
self.changed = False
break
else:
# service already disabled altogether
if not self.enable:
self.changed = False
if not self.changed:
return
#
# update-rc.d style
#
if self.enable_cmd.endswith("update-rc.d"):
enabled = False
slinks = glob.glob('/etc/rc?.d/S??' + self.name)
if slinks:
enabled = True
if self.enable != enabled:
self.changed = True
if self.enable:
action = 'enable'
klinks = glob.glob('/etc/rc?.d/K??' + self.name)
if not klinks:
if not self.module.check_mode:
(rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
action = 'disable'
if not self.module.check_mode:
(rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
self.changed = False
return
#
# insserv (Debian <=7, SLES, others)
#
if self.enable_cmd.endswith("insserv"):
if self.enable:
(rc, out, err) = self.execute_command("%s -n -v %s" % (self.enable_cmd, self.name))
else:
(rc, out, err) = self.execute_command("%s -n -r -v %s" % (self.enable_cmd, self.name))
self.changed = False
for line in err.splitlines():
if self.enable and line.find('enable service') != -1:
self.changed = True
break
if not self.enable and line.find('remove service') != -1:
self.changed = True
break
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
if not self.changed:
return
if self.enable:
(rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
else:
(rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
#
# If we've gotten to the end, the service needs to be updated
#
self.changed = True
# we change argument order depending on real binary used:
# rc-update and systemctl need the argument order reversed
if self.enable_cmd.endswith("rc-update"):
args = (self.enable_cmd, action, self.name + " " + self.runlevel)
elif self.enable_cmd.endswith("systemctl"):
args = (self.enable_cmd, action, self.__systemd_unit)
else:
args = (self.enable_cmd, self.name, action)
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
(rc, out, err) = self.execute_command("%s %s %s" % args)
if rc != 0:
if err:
self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err))
else:
self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out))
return (rc, out, err)
def service_control(self):
# Decide what command to run
svc_cmd = ''
arguments = self.arguments
if self.svc_cmd:
if not self.svc_cmd.endswith("systemctl"):
if self.svc_cmd.endswith("initctl"):
# initctl commands take the form <cmd> <action> <name>
svc_cmd = self.svc_cmd
arguments = "%s %s" % (self.name, arguments)
else:
# SysV and OpenRC take the form <cmd> <name> <action>
svc_cmd = "%s %s" % (self.svc_cmd, self.name)
else:
# systemd commands take the form <cmd> <action> <name>
svc_cmd = self.svc_cmd
arguments = "%s %s" % (self.__systemd_unit, arguments)
elif self.svc_cmd is None and self.svc_initscript:
# upstart
svc_cmd = "%s" % self.svc_initscript
# In OpenRC, if a service crashed, we need to reset its status to
# stopped with the zap command, before we can start it back.
if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
self.execute_command("%s zap" % svc_cmd, daemonize=True)
if self.action != "restart":
if svc_cmd != '':
# upstart or systemd or OpenRC
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# SysV
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True)
elif self.svc_cmd and self.svc_cmd.endswith('rc-service'):
# All services in OpenRC support restart.
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# In other systems, not all services support restart. Do it the hard way.
if svc_cmd != '':
# upstart or systemd
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True)
else:
# SysV
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True)
if self.sleep:
time.sleep(self.sleep)
if svc_cmd != '':
# upstart or systemd
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True)
else:
# SysV
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True)
# merge return information
if rc1 != 0 and rc2 == 0:
rc_state = rc2
stdout = stdout2
stderr = stderr2
else:
rc_state = rc1 + rc2
stdout = stdout1 + stdout2
stderr = stderr1 + stderr2
return (rc_state, stdout, stderr)
class FreeBsdService(Service):
"""
This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot and the 'service' binary to
check status and perform direct service manipulation.
"""
platform = 'FreeBSD'
distribution = None
def get_service_tools(self):
self.svc_cmd = self.module.get_bin_path('service', True)
if not self.svc_cmd:
self.module.fail_json(msg='unable to find service binary')
self.sysrc_cmd = self.module.get_bin_path('sysrc')
def get_service_status(self):
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.arguments, self.name, 'onestatus'))
if self.name == "pf":
self.running = "Enabled" in stdout
else:
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_enable(self):
if self.enable:
self.rcconf_value = "YES"
else:
self.rcconf_value = "NO"
rcfiles = ['/etc/rc.conf', '/etc/rc.conf.local', '/usr/local/etc/rc.conf']
for rcfile in rcfiles:
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.arguments, self.name, 'rcvar'))
try:
rcvars = shlex.split(stdout, comments=True)
except Exception:
# TODO: add a warning to the output with the failure
pass
if not rcvars:
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
# In rare cases, i.e. sendmail, rcvar can return several key=value pairs
# Usually there is just one, however. In other rare cases, i.e. uwsgi,
# rcvar can return extra uncommented data that is not at all related to
# the rcvar. We will just take the first key=value pair we come across
# and hope for the best.
for rcvar in rcvars:
if '=' in rcvar:
self.rcconf_key, default_rcconf_value = rcvar.split('=', 1)
break
if self.rcconf_key is None:
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
if self.sysrc_cmd: # FreeBSD >= 9.2
rc, current_rcconf_value, stderr = self.execute_command("%s -n %s" % (self.sysrc_cmd, self.rcconf_key))
# it can happen that rcvar is not set (case of a system coming from the ports collection)
# so we will fallback on the default
if rc != 0:
current_rcconf_value = default_rcconf_value
if current_rcconf_value.strip().upper() != self.rcconf_value:
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
rc, change_stdout, change_stderr = self.execute_command("%s %s=\"%s\"" % (self.sysrc_cmd, self.rcconf_key, self.rcconf_value))
if rc != 0:
self.module.fail_json(msg="unable to set rcvar using sysrc", stdout=change_stdout, stderr=change_stderr)
# sysrc does not exit with code 1 on permission error => validate successful change using service(8)
rc, check_stdout, check_stderr = self.execute_command("%s %s %s" % (self.svc_cmd, self.name, "enabled"))
if self.enable != (rc == 0): # rc = 0 indicates enabled service, rc = 1 indicates disabled service
self.module.fail_json(msg="unable to set rcvar: sysrc did not change value", stdout=change_stdout, stderr=change_stderr)
else:
self.changed = False
else: # Legacy (FreeBSD < 9.2)
try:
return self.service_enable_rcconf()
except Exception:
self.module.fail_json(msg='unable to set rcvar')
def service_control(self):
if self.action == "start":
self.action = "onestart"
if self.action == "stop":
self.action = "onestop"
if self.action == "reload":
self.action = "onereload"
ret = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.arguments, self.name, self.action))
if self.sleep:
time.sleep(self.sleep)
return ret
class DragonFlyBsdService(FreeBsdService):
"""
This is the DragonFly BSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot and the 'service' binary to
check status and perform direct service manipulation.
"""
platform = 'DragonFly'
distribution = None
def service_enable(self):
if self.enable:
self.rcconf_value = "YES"
else:
self.rcconf_value = "NO"
rcfiles = ['/etc/rc.conf'] # Overkill?
for rcfile in rcfiles:
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
self.rcconf_key = "%s" % self.name.replace("-", "_")
return self.service_enable_rcconf()
class OpenBsdService(Service):
"""
This is the OpenBSD Service manipulation class - it uses rcctl(8) or
/etc/rc.d scripts for service control. Enabling a service is
only supported if rcctl is present.
"""
platform = 'OpenBSD'
distribution = None
def get_service_tools(self):
self.enable_cmd = self.module.get_bin_path('rcctl')
if self.enable_cmd:
self.svc_cmd = self.enable_cmd
else:
rcdir = '/etc/rc.d'
rc_script = "%s/%s" % (rcdir, self.name)
if os.path.isfile(rc_script):
self.svc_cmd = rc_script
if not self.svc_cmd:
self.module.fail_json(msg='unable to find svc_cmd')
def get_service_status(self):
if self.enable_cmd:
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name))
else:
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
if stderr:
self.module.fail_json(msg=stderr)
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_control(self):
if self.enable_cmd:
return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name), daemonize=True)
else:
return self.execute_command("%s -f %s" % (self.svc_cmd, self.action))
def service_enable(self):
if not self.enable_cmd:
return super(OpenBsdService, self).service_enable()
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status'))
status_action = None
if self.enable:
if rc != 0:
status_action = "on"
elif self.enable is not None:
# should be explicit False at this point
if rc != 1:
status_action = "off"
if status_action is not None:
self.changed = True
if not self.module.check_mode:
rc, stdout, stderr = self.execute_command("%s set %s status %s" % (self.enable_cmd, self.name, status_action))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg="rcctl failed to modify service status")
class NetBsdService(Service):
"""
This is the NetBSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot, check status and perform
direct service manipulation. Init scripts in /etc/rc.d are used for
controlling services (start/stop) as well as for controlling the current
state.
"""
platform = 'NetBSD'
distribution = None
def get_service_tools(self):
initpaths = ['/etc/rc.d'] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories'
for initdir in initpaths:
initscript = "%s/%s" % (initdir, self.name)
if os.path.isfile(initscript):
self.svc_initscript = initscript
if not self.svc_initscript:
self.module.fail_json(msg='unable to find rc.d script')
def service_enable(self):
if self.enable:
self.rcconf_value = "YES"
else:
self.rcconf_value = "NO"
rcfiles = ['/etc/rc.conf'] # Overkill?
for rcfile in rcfiles:
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
self.rcconf_key = "%s" % self.name.replace("-", "_")
return self.service_enable_rcconf()
def get_service_status(self):
self.svc_cmd = "%s" % self.svc_initscript
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus'))
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_control(self):
if self.action == "start":
self.action = "onestart"
if self.action == "stop":
self.action = "onestop"
self.svc_cmd = "%s" % self.svc_initscript
return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True)
class SunOSService(Service):
"""
This is the SunOS Service manipulation class - it uses the svcadm
command for controlling services, and svcs command for checking status.
It also tries to be smart about taking the service out of maintenance
state if necessary.
"""
platform = 'SunOS'
distribution = None
def get_service_tools(self):
self.svcs_cmd = self.module.get_bin_path('svcs', True)
if not self.svcs_cmd:
self.module.fail_json(msg='unable to find svcs binary')
self.svcadm_cmd = self.module.get_bin_path('svcadm', True)
if not self.svcadm_cmd:
self.module.fail_json(msg='unable to find svcadm binary')
if self.svcadm_supports_sync():
self.svcadm_sync = '-s'
else:
self.svcadm_sync = ''
def svcadm_supports_sync(self):
# Support for synchronous restart/refresh is only supported on
# Oracle Solaris >= 11.2
for line in open('/etc/release', 'r').readlines():
m = re.match(r'\s+Oracle Solaris (\d+)\.(\d+).*', line.rstrip())
if m and m.groups() >= ('11', '2'):
return True
def get_service_status(self):
status = self.get_sunos_svcs_status()
# Only 'online' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'online':
self.running = True
else:
self.running = False
def get_sunos_svcs_status(self):
rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[0]
# status is one of: online, offline, degraded, disabled, maintenance, uninitialized
# see man svcs(1)
return status
def service_enable(self):
# Get current service enablement status
rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
enabled = False
temporary = False
# look for enabled line, which could be one of:
# enabled true (temporary)
# enabled false (temporary)
# enabled true
# enabled false
for line in stdout.split("\n"):
if line.startswith("enabled"):
if "true" in line:
enabled = True
if "temporary" in line:
temporary = True
startup_enabled = (enabled and not temporary) or (not enabled and temporary)
if self.enable and startup_enabled:
return
elif (not self.enable) and (not startup_enabled):
return
if not self.module.check_mode:
# Mark service as started or stopped (this will have the side effect of
# actually stopping or starting the service)
if self.enable:
subcmd = "enable -rs"
else:
subcmd = "disable -s"
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
self.changed = True
def service_control(self):
status = self.get_sunos_svcs_status()
# if starting or reloading, clear maintenance states
if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
if rc != 0:
return rc, stdout, stderr
status = self.get_sunos_svcs_status()
if status in ['maintenance', 'degraded']:
self.module.fail_json(msg="Failed to bring service out of %s status." % status)
if self.action == 'start':
subcmd = "enable -rst"
elif self.action == 'stop':
subcmd = "disable -st"
elif self.action == 'reload':
subcmd = "refresh %s" % (self.svcadm_sync)
elif self.action == 'restart' and status == 'online':
subcmd = "restart %s" % (self.svcadm_sync)
elif self.action == 'restart' and status != 'online':
subcmd = "enable -rst"
return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
class AIX(Service):
"""
This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc
and refresh for service control. Enabling a service is currently not supported.
Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab
commands)
"""
platform = 'AIX'
distribution = None
def get_service_tools(self):
self.lssrc_cmd = self.module.get_bin_path('lssrc', True)
if not self.lssrc_cmd:
self.module.fail_json(msg='unable to find lssrc binary')
self.startsrc_cmd = self.module.get_bin_path('startsrc', True)
if not self.startsrc_cmd:
self.module.fail_json(msg='unable to find startsrc binary')
self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True)
if not self.stopsrc_cmd:
self.module.fail_json(msg='unable to find stopsrc binary')
self.refresh_cmd = self.module.get_bin_path('refresh', True)
if not self.refresh_cmd:
self.module.fail_json(msg='unable to find refresh binary')
def get_service_status(self):
status = self.get_aix_src_status()
# Only 'active' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'active':
self.running = True
else:
self.running = False
def get_aix_src_status(self):
# Check subsystem status
rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name))
if rc == 1:
# If check for subsystem is not ok, check if service name is a
# group subsystem
rc, stdout, stderr = self.execute_command("%s -g %s" % (self.lssrc_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
else:
# Check all subsystem status, if one subsystem is not active
# the group is considered not active.
lines = stdout.splitlines()
for state in lines[1:]:
if state.split()[-1].strip() != "active":
status = state.split()[-1].strip()
break
else:
status = "active"
# status is one of: active, inoperative
return status
else:
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[-1]
# status is one of: active, inoperative
return status
def service_control(self):
# Check if service name is a subsystem of a group subsystem
rc, stdout, stderr = self.execute_command("%s -a" % (self.lssrc_cmd))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
else:
lines = stdout.splitlines()
subsystems = []
groups = []
for line in lines[1:]:
subsystem = line.split()[0].strip()
group = line.split()[1].strip()
subsystems.append(subsystem)
if group:
groups.append(group)
# Define if service name parameter:
# -s subsystem or -g group subsystem
if self.name in subsystems:
srccmd_parameter = "-s"
elif self.name in groups:
srccmd_parameter = "-g"
if self.action == 'start':
srccmd = self.startsrc_cmd
elif self.action == 'stop':
srccmd = self.stopsrc_cmd
elif self.action == 'reload':
srccmd = self.refresh_cmd
elif self.action == 'restart':
self.execute_command("%s %s %s" % (self.stopsrc_cmd, srccmd_parameter, self.name))
if self.sleep:
time.sleep(self.sleep)
srccmd = self.startsrc_cmd
if self.arguments and self.action in ('start', 'restart'):
return self.execute_command("%s -a \"%s\" %s %s" % (srccmd, self.arguments, srccmd_parameter, self.name))
else:
return self.execute_command("%s %s %s" % (srccmd, srccmd_parameter, self.name))
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['started', 'stopped', 'reloaded', 'restarted']),
sleep=dict(type='int'),
pattern=dict(type='str'),
enabled=dict(type='bool'),
runlevel=dict(type='str', default='default'),
arguments=dict(type='str', default='', aliases=['args']),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
service = Service(module)
module.debug('Service instantiated - platform %s' % service.platform)
if service.distribution:
module.debug('Service instantiated - distribution %s' % service.distribution)
rc = 0
out = ''
err = ''
result = {}
result['name'] = service.name
# Find service management tools
service.get_service_tools()
# Enable/disable service startup at boot if requested
if service.module.params['enabled'] is not None:
# FIXME: ideally this should detect if we need to toggle the enablement state, though
# it's unlikely the changed handler would need to fire in this case so it's a minor thing.
service.service_enable()
result['enabled'] = service.enable
if module.params['state'] is None:
# Not changing the running state, so bail out now.
result['changed'] = service.changed
module.exit_json(**result)
result['state'] = service.state
# Collect service status
if service.pattern:
service.check_ps()
else:
service.get_service_status()
# Calculate if request will change service state
service.check_service_changed()
# Modify service state if necessary
(rc, out, err) = service.modify_service_state()
if rc != 0:
if err and "Job is already running" in err:
# upstart got confused, one such possibility is MySQL on Ubuntu 12.04
# where status may report it has no start/stop links and we could
# not get accurate status
pass
else:
if err:
module.fail_json(msg=err)
else:
module.fail_json(msg=out)
result['changed'] = service.changed | service.svc_change
if service.module.params['enabled'] is not None:
result['enabled'] = service.module.params['enabled']
if not service.module.params['state']:
status = service.get_service_status()
if status is None:
result['state'] = 'absent'
elif status is False:
result['state'] = 'started'
else:
result['state'] = 'stopped'
else:
# as we may have just bounced the service the service command may not
# report accurate state at this moment so just show what we ran
if service.module.params['state'] in ['reloaded', 'restarted', 'started']:
result['state'] = 'started'
else:
result['state'] = 'stopped'
module.exit_json(**result)
if __name__ == '__main__':
main()
| 62,314
|
Python
|
.py
| 1,347
| 34.535264
| 158
| 0.574187
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,180
|
set_fact.py
|
ansible_ansible/lib/ansible/modules/set_fact.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: set_fact
short_description: Set host variable(s) and fact(s).
version_added: "1.2"
description:
- This action allows setting variables associated to the current host.
- These variables will be available to subsequent plays during an ansible-playbook run via the host they were set on.
- Set O(cacheable) to V(true) to save variables across executions using a fact cache.
Variables will keep the set_fact precedence for the current run, but will used 'cached fact' precedence for subsequent ones.
- Per the standard Ansible variable precedence rules, other types of variables have a higher priority, so this value may be overridden.
options:
key_value:
description:
- "The M(ansible.builtin.set_fact) module takes C(key=value) pairs or C(key: value) (YAML notation) as variables to set in the playbook scope.
The 'key' is the resulting variable name and the value is, of course, the value of said variable."
- You can create multiple variables at once, by supplying multiple pairs, but do NOT mix notations.
required: true
cacheable:
description:
- This boolean converts the variable into an actual 'fact' which will also be added to the fact cache.
It does not enable fact caching across runs, it just means it will work with it if already enabled.
- Normally this module creates 'host level variables' and has much higher precedence, this option changes the nature and precedence
(by 7 steps) of the variable created.
U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable)
- "This actually creates 2 copies of the variable, a normal 'set_fact' host variable with high precedence and
a lower 'ansible_fact' one that is available for persistence via the facts cache plugin.
This creates a possibly confusing interaction with C(meta: clear_facts) as it will remove the 'ansible_fact' but not the host variable."
type: bool
default: no
version_added: "2.4"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
attributes:
action:
details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
support: partial
bypass_host_loop:
support: none
bypass_task_loop:
support: none
check_mode:
support: full
core:
details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
support: partial
delegation:
details:
- while variable assignment can be delegated to a different host the execution context is always the current inventory_hostname
- connection variables, if set at all, would reflect the host it would target, even if we are not connecting at all in this case
support: partial
diff_mode:
support: none
notes:
- Because of the nature of tasks, set_fact will produce 'static' values for a variable.
Unlike normal 'lazy' variables, the value gets evaluated and templated on assignment.
- Some boolean values (yes, no, true, false) will always be converted to boolean type,
unless C(DEFAULT_JINJA2_NATIVE) is enabled. This is done so the C(var=value) booleans,
otherwise it would only be able to create strings, but it also prevents using those values to create YAML strings.
Using the setting will restrict k=v to strings, but will allow you to specify string or boolean in YAML.
- "To create lists/arrays or dictionary/hashes use YAML notation C(var: [val1, val2])."
- Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name.
seealso:
- module: ansible.builtin.include_vars
- ref: ansible_variable_precedence
description: More information related to variable precedence and which type of variable wins over others.
author:
- Dag Wieers (@dagwieers)
"""
EXAMPLES = r"""
- name: Setting host facts using key=value pairs, this format can only create strings or booleans
ansible.builtin.set_fact: one_fact="something" other_fact="{{ local_var }}"
- name: Setting host facts using complex arguments
ansible.builtin.set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
- name: Setting facts so that they will be persisted in the fact cache
ansible.builtin.set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
cacheable: yes
- name: Creating list and dictionary variables
ansible.builtin.set_fact:
one_dict:
something: here
other: there
one_list:
- a
- b
- c
# As of Ansible 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
# to proper boolean values when using the key=value syntax, however it is still
# recommended that booleans be set using the complex argument style:
- name: Setting booleans using complex argument style
ansible.builtin.set_fact:
one_fact: yes
other_fact: no
- name: Creating list and dictionary variables using 'shorthand' YAML
ansible.builtin.set_fact:
two_dict: {'something': here2, 'other': somewhere}
two_list: [1,2,3]
"""
| 5,721
|
Python
|
.py
| 110
| 47.127273
| 159
| 0.729561
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,181
|
import_role.py
|
ansible_ansible/lib/ansible/modules/import_role.py
|
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
author: Ansible Core Team (@ansible)
module: import_role
short_description: Import a role into a play
description:
- Much like the C(roles:) keyword, this task loads a role, but it allows you to control when the role tasks run in
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(ansible.builtin.include_role) instead.
- Does not work in handlers.
version_added: '2.4'
options:
name:
description:
- The name of the role to be executed.
type: str
required: true
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
type: str
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
type: str
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
type: str
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: yes
handlers_from:
description:
- File to load from a role's C(handlers/) directory.
type: str
default: main
version_added: '2.8'
rolespec_validate:
description:
- Perform role argument spec validation if an argument spec is defined.
type: bool
default: yes
version_added: '2.11'
public:
description:
- This option dictates whether the role's C(vars) and C(defaults) are exposed to the play.
- Variables are exposed to the play at playbook parsing time, and available to earlier roles and tasks as well unlike C(include_role).
- The default depends on the configuration option :ref:`default_private_role_vars`.
type: bool
default: yes
version_added: '2.17'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
- action_core
- action_core.import
attributes:
check_mode:
support: full
diff_mode:
support: none
notes:
- Handlers are made available to the whole play.
- Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed to the play at playbook parsing time.
Due to this, these variables will be accessible to roles and tasks executed before the location of the
M(ansible.builtin.import_role) task.
- Unlike M(ansible.builtin.include_role) variable exposure is not configurable, and will always be exposed.
seealso:
- module: ansible.builtin.import_playbook
- module: ansible.builtin.import_tasks
- module: ansible.builtin.include_role
- module: ansible.builtin.include_tasks
- ref: playbooks_reuse
description: More information related to including and importing playbooks, roles and tasks.
"""
EXAMPLES = r"""
- hosts: all
tasks:
- ansible.builtin.import_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
ansible.builtin.import_role:
name: myrole
tasks_from: other
- name: Pass variables to role
ansible.builtin.import_role:
name: myrole
vars:
rolevar1: value from task
- name: Apply condition to each task in role
ansible.builtin.import_role:
name: myrole
when: not idontwanttorun
"""
RETURN = r"""
# This module does not return anything except tasks to execute.
"""
| 3,707
|
Python
|
.py
| 109
| 29.724771
| 140
| 0.716435
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,182
|
getent.py
|
ansible_ansible/lib/ansible/modules/getent.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: getent
short_description: A wrapper to the unix getent utility
description:
- Runs getent against one of its various databases and returns information into
the host's facts, in a C(getent_<database>) prefixed variable.
version_added: "1.8"
options:
database:
description:
- The name of a getent database supported by the target system (passwd, group,
hosts, etc).
type: str
required: True
key:
description:
- Key from which to return values from the specified database, otherwise the
full contents are returned.
type: str
service:
description:
- Override all databases with the specified service
- The underlying system must support the service flag which is not always available.
type: str
version_added: "2.9"
split:
description:
- Character used to split the database values into lists/arrays such as V(:) or V(\\t),
otherwise it will try to pick one depending on the database.
type: str
fail_key:
description:
- If a supplied key is missing this will make the task fail if V(true).
type: bool
default: 'yes'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
attributes:
check_mode:
support: full
diff_mode:
support: none
facts:
support: full
platform:
platforms: posix
notes:
- Not all databases support enumeration, check system documentation for details.
author:
- Brian Coca (@bcoca)
"""
EXAMPLES = """
- name: Get root user info
ansible.builtin.getent:
database: passwd
key: root
- ansible.builtin.debug:
var: ansible_facts.getent_passwd
- name: Get all groups
ansible.builtin.getent:
database: group
split: ':'
- ansible.builtin.debug:
var: ansible_facts.getent_group
- name: Get all hosts, split by tab
ansible.builtin.getent:
database: hosts
- ansible.builtin.debug:
var: ansible_facts.getent_hosts
- name: Get http service info, no error if missing
ansible.builtin.getent:
database: services
key: http
fail_key: False
- ansible.builtin.debug:
var: ansible_facts.getent_services
- name: Get user password hash (requires sudo/root)
ansible.builtin.getent:
database: shadow
key: www-data
split: ':'
- ansible.builtin.debug:
var: ansible_facts.getent_shadow
"""
RETURN = """
ansible_facts:
description: Facts to add to ansible_facts.
returned: always
type: dict
contains:
getent_<database>:
description:
- A list of results or a single result as a list of the fields the db provides
- The list elements depend on the database queried, see getent man page for the structure
- Starting at 2.11 it now returns multiple duplicate entries, previously it only returned the last one
returned: always
type: list
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
database=dict(type='str', required=True),
key=dict(type='str', no_log=False),
service=dict(type='str'),
split=dict(type='str'),
fail_key=dict(type='bool', default=True),
),
supports_check_mode=True,
)
colon = ['passwd', 'shadow', 'group', 'gshadow']
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
service = module.params.get('service')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [getent_bin, database, key]
else:
cmd = [getent_bin, database]
if service is not None:
cmd.extend(['-s', service])
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = {dbtree: {}}
if rc == 0:
seen = {}
for line in out.splitlines():
record = line.split(split)
if record[0] in seen:
# more than one result for same key, ensure we store in a list
if seen[record[0]] == 1:
results[dbtree][record[0]] = [results[dbtree][record[0]]]
results[dbtree][record[0]].append(record[1:])
seen[record[0]] += 1
else:
# new key/value, just assign
results[dbtree][record[0]] = record[1:]
seen[record[0]] = 1
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
if __name__ == '__main__':
main()
| 5,644
|
Python
|
.py
| 167
| 27.167665
| 110
| 0.640654
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,183
|
known_hosts.py
|
ansible_ansible/lib/ansible/modules/known_hosts.py
|
# Copyright: (c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- The M(ansible.builtin.known_hosts) module lets you add or remove host keys from the C(known_hosts) file.
- Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
This is useful if you're going to want to use the M(ansible.builtin.git) module over ssh, for example.
- If you have a very large number of host keys to manage, you will find the M(ansible.builtin.template) module more useful.
version_added: "1.9"
options:
name:
aliases: [ 'host' ]
description:
- The host to add or remove (must match a host specified in key). It will be converted to lowercase so that C(ssh-keygen) can find it.
- Must match with <hostname> or <ip> present in key attribute.
- For custom SSH port, O(name) needs to specify port as well. See example section.
type: str
required: true
key:
description:
- The SSH public host key, as a string.
- Required if O(state=present), optional when O(state=absent), in which case all keys for the host are removed.
- The key must be in the right format for SSH (see sshd(8), section "SSH_KNOWN_HOSTS FILE FORMAT").
- Specifically, the key should not match the format that is found in an SSH pubkey file, but should rather have the hostname prepended to a
line that includes the pubkey, the same way that it would appear in the known_hosts file. The value prepended to the line must also match
the value of the name parameter.
- Should be of format C(<hostname[,IP]> ssh-rsa <pubkey>).
- For custom SSH port, O(key) needs to specify port as well. See example section.
type: str
path:
description:
- The known_hosts file to edit.
- The known_hosts file will be created if needed. The rest of the path must exist prior to running the module.
default: "~/.ssh/known_hosts"
type: path
hash_host:
description:
- Hash the hostname in the known_hosts file.
type: bool
default: "no"
version_added: "2.3"
state:
description:
- V(present) to add host keys.
- V(absent) to remove host keys.
choices: [ "absent", "present" ]
default: "present"
type: str
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix
extends_documentation_fragment:
- action_common_attributes
author:
- Matthew Vernon (@mcv21)
"""
EXAMPLES = r"""
- name: Tell the host about our servers it might want to ssh to
ansible.builtin.known_hosts:
path: /etc/ssh/ssh_known_hosts
name: foo.com.invalid
key: "{{ lookup('ansible.builtin.file', 'pubkeys/foo.com.invalid') }}"
- name: Another way to call known_hosts
ansible.builtin.known_hosts:
name: host1.example.com # or 10.9.8.77
key: host1.example.com,10.9.8.77 ssh-rsa ASDeararAIUHI324324 # some key gibberish
path: /etc/ssh/ssh_known_hosts
state: present
- name: Add host with custom SSH port
ansible.builtin.known_hosts:
name: '[host1.example.com]:2222'
key: '[host1.example.com]:2222 ssh-rsa ASDeararAIUHI324324' # some key gibberish
path: /etc/ssh/ssh_known_hosts
state: present
"""
# Makes sure public host keys are present or absent in the given known_hosts
# file.
#
# Arguments
# =========
# name = hostname whose key should be added (alias: host)
# key = line(s) to add to known_hosts file
# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
# hash_host = yes|no (default: no) hash the hostname in the known_hosts file
# state = absent|present (default: present)
import base64
import copy
import errno
import hashlib
import hmac
import os
import os.path
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
def enforce_state(module, params):
"""
Add or remove key.
"""
results = dict(changed=False)
host = params["name"].lower()
key = params.get("key", None)
path = params.get("path")
hash_host = params.get("hash_host")
state = params.get("state")
# Find the ssh-keygen binary
sshkeygen = module.get_bin_path("ssh-keygen", True)
if not key and state != "absent":
module.fail_json(msg="No key specified when adding a host")
if key and hash_host:
key = hash_host_key(host, key)
# Trailing newline in files gets lost, so re-add if necessary
if key and not key.endswith('\n'):
key += '\n'
sanity_check(module, host, key, sshkeygen)
found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen)
results['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
# check if we are trying to remove a non matching key,
# in that case return with no change to the host
if state == 'absent' and not found_line and key:
return results
# We will change state if found==True & state!="present"
# or found==False & state=="present"
# i.e found XOR (state=="present")
# Alternatively, if replace is true (i.e. key present, and we must change
# it)
if module.check_mode:
results['changed'] = replace_or_add or (state == "present") != found
module.exit_json(**results)
# Now do the work.
# Only remove whole host if found and no key provided
if found and not key and state == "absent":
module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
results['changed'] = True
# Next, add a new (or replacing) entry
if replace_or_add or found != (state == "present"):
try:
inf = open(path, "r")
except IOError as e:
if e.errno == errno.ENOENT:
inf = None
else:
module.fail_json(msg="Failed to read %s: %s" % (path, str(e)))
try:
with tempfile.NamedTemporaryFile(mode='w+', dir=os.path.dirname(path), delete=False) as outf:
if inf is not None:
for line_number, line in enumerate(inf):
if found_line == (line_number + 1) and (replace_or_add or state == 'absent'):
continue # skip this line to replace its key
outf.write(line)
inf.close()
if state == 'present':
outf.write(key)
except (IOError, OSError) as e:
module.fail_json(msg="Failed to write to file %s: %s" % (path, to_native(e)))
else:
module.atomic_move(outf.name, path)
results['changed'] = True
return results
def sanity_check(module, host, key, sshkeygen):
"""Check supplied key is sensible
host and key are parameters provided by the user; If the host
provided is inconsistent with the key supplied, then this function
quits, providing an error to the user.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
"""
# If no key supplied, we're doing a removal, and have nothing to check here.
if not key:
return
# Rather than parsing the key ourselves, get ssh-keygen to do it
# (this is essential for hashed keys, but otherwise useful, as the
# key question is whether ssh-keygen thinks the key matches the host).
# The approach is to write the key to a temporary file,
# and then attempt to look up the specified host in that file.
if re.search(r'\S+(\s+)?,(\s+)?', host):
module.fail_json(msg="Comma separated list of names is not supported. "
"Please pass a single name to lookup in the known_hosts file.")
with tempfile.NamedTemporaryFile(mode='w+') as outf:
try:
outf.write(key)
outf.flush()
except IOError as e:
module.fail_json(msg="Failed to write to temporary file %s: %s" %
(outf.name, to_native(e)))
sshkeygen_command = [sshkeygen, '-F', host, '-f', outf.name]
rc, stdout, stderr = module.run_command(sshkeygen_command)
if stdout == '': # host not found
module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
def search_for_host_key(module, host, key, path, sshkeygen):
"""search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
Looks up host and keytype in the known_hosts file path; if it's there, looks to see
if one of those entries matches key. Returns:
found (Boolean): is host found in path?
replace_or_add (Boolean): is the key in path different to that supplied by user?
found_line (int or None): the line where a key of the same type was found
if found=False, then replace is always False.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
"""
if os.path.exists(path) is False:
return False, False, None
sshkeygen_command = [sshkeygen, '-F', host, '-f', path]
# openssh >=6.4 has changed ssh-keygen behaviour such that it returns
# 1 if no host is found, whereas previously it returned 0
rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=False)
if stdout == '' and stderr == '' and (rc == 0 or rc == 1):
return False, False, None # host not found, no other errors
if rc != 0: # something went wrong
module.fail_json(msg="ssh-keygen failed (rc=%d, stdout='%s',stderr='%s')" % (rc, stdout, stderr))
# If user supplied no key, we don't want to try and replace anything with it
if not key:
return True, False, None
lines = stdout.split('\n')
new_key = normalize_known_hosts_key(key)
for lnum, l in enumerate(lines):
if l == '':
continue
elif l[0] == '#': # info output from ssh-keygen; contains the line number where key was found
try:
# This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
# It always outputs the non-localized comment before the found key
found_line = int(re.search(r'found: line (\d+)', l).group(1))
except IndexError:
module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
else:
found_key = normalize_known_hosts_key(l)
if 'options' in found_key and found_key['options'][:15] == '@cert-authority':
if new_key == found_key: # found a match
return True, False, found_line # found exactly the same key, don't replace
elif 'options' in found_key and found_key['options'][:7] == '@revoke':
if new_key == found_key: # found a match
return True, False, found_line # found exactly the same key, don't replace
else:
if new_key['host'][:3] == '|1|' and found_key['host'][:3] == '|1|': # do not change host hash if already hashed
new_key['host'] = found_key['host']
if new_key == found_key: # found a match
return True, False, found_line # found exactly the same key, don't replace
elif new_key['type'] == found_key['type']: # found a different key for the same key type
return True, True, found_line
# No match found, return found and replace, but no line
return True, True, None
def hash_host_key(host, key):
hmac_key = os.urandom(20)
hashed_host = hmac.new(hmac_key, to_bytes(host), hashlib.sha1).digest()
parts = key.strip().split()
# @ indicates the optional marker field used for @cert-authority or @revoked
i = 1 if parts[0][0] == '@' else 0
parts[i] = '|1|%s|%s' % (to_native(base64.b64encode(hmac_key)), to_native(base64.b64encode(hashed_host)))
return ' '.join(parts)
def normalize_known_hosts_key(key):
"""
Transform a key, either taken from a known_host file or provided by the
user, into a normalized form.
The host part (which might include multiple hostnames or be hashed) gets
replaced by the provided host. Also, any spurious information gets removed
from the end (like the username@host tag usually present in hostkeys, but
absent in known_hosts files)
"""
key = key.strip() # trim trailing newline
k = key.split()
d = dict()
# The optional "marker" field, used for @cert-authority or @revoked
if k[0][0] == '@':
d['options'] = k[0]
d['host'] = k[1]
d['type'] = k[2]
d['key'] = k[3]
else:
d['host'] = k[0]
d['type'] = k[1]
d['key'] = k[2]
return d
def compute_diff(path, found_line, replace_or_add, state, key):
diff = {
'before_header': path,
'after_header': path,
'before': '',
'after': '',
}
try:
inf = open(path, "r")
except IOError as e:
if e.errno == errno.ENOENT:
diff['before_header'] = '/dev/null'
else:
diff['before'] = inf.read()
inf.close()
lines = diff['before'].splitlines(1)
if (replace_or_add or state == 'absent') and found_line is not None and 1 <= found_line <= len(lines):
del lines[found_line - 1]
if state == 'present' and (replace_or_add or found_line is None):
lines.append(key)
diff['after'] = ''.join(lines)
return diff
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['host']),
key=dict(required=False, type='str', no_log=False),
path=dict(default="~/.ssh/known_hosts", type='path'),
hash_host=dict(required=False, type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
),
supports_check_mode=True
)
# TODO: deprecate returning everything that was passed in
results = copy.copy(module.params)
results.update(enforce_state(module, module.params))
module.exit_json(**results)
if __name__ == '__main__':
main()
| 14,439
|
Python
|
.py
| 319
| 38.269592
| 145
| 0.640074
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,184
|
yum_repository.py
|
ansible_ansible/lib/ansible/modules/yum_repository.py
|
# encoding: utf-8
# (c) 2015-2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: yum_repository
author: Jiri Tyr (@jtyr)
version_added: '2.1'
short_description: Add or remove YUM repositories
description:
- Add or remove YUM repositories in RPM-based Linux distributions.
- If you wish to update an existing repository definition use M(community.general.ini_file) instead.
options:
async:
description:
- If set to V(true) Yum will download packages and metadata from this
repo in parallel, if possible.
- In ansible-core 2.11, 2.12, and 2.13 the default value is V(true).
- This option has been removed in RHEL 8. If you're using one of the
versions listed above, you can set this option to V(null) to avoid passing an
unknown configuration option.
- This parameter is deprecated as it has been removed on systems supported by ansible-core
and will be removed in ansible-core 2.22.
type: bool
bandwidth:
description:
- Maximum available network bandwidth in bytes/second. Used with the
O(throttle) option.
- If O(throttle) is a percentage and bandwidth is V(0) then bandwidth
throttling will be disabled. If O(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is V(0) (no bandwidth
throttling).
type: str
baseurl:
description:
- URL to the directory where the yum repository's 'repodata' directory
lives.
- It can also be a list of multiple URLs.
- This, the O(metalink) or O(mirrorlist) parameters are required if O(state) is set to
V(present).
type: list
elements: str
cost:
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
type: str
countme:
description:
- Whether a special flag should be added to a randomly chosen metalink/mirrorlist query each week.
This allows the repository owner to estimate the number of systems consuming it.
default: ~
type: bool
version_added: '2.18'
deltarpm_metadata_percentage:
description:
- When the relative size of deltarpm metadata vs pkgs is larger than
this, deltarpm metadata is not downloaded from the repo. Note that you
can give values over V(100), so V(200) means that the metadata is
required to be half the size of the packages. Use V(0) to turn off
this check, and always download metadata.
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: str
deltarpm_percentage:
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use V(0) to turn off delta rpm processing. Local repositories
(with file://O(baseurl)) have delta rpms turned off by default.
type: str
description:
description:
- A human-readable string describing the repository. This option corresponds to the C(name) property in the repo file.
- This parameter is only required if O(state=present).
type: str
enabled:
description:
- This tells yum whether or not use this repository.
- Yum default value is V(true).
type: bool
enablegroups:
description:
- Determines whether yum will allow the use of package groups for this
repository.
- Yum default value is V(true).
type: bool
exclude:
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (for example V(*) and V(?))
are allowed.
- The list can also be a regular YAML array.
- O(excludepkgs) alias was added in ansible-core 2.18.
type: list
elements: str
aliases:
- excludepkgs
failovermethod:
choices: [roundrobin, priority]
description:
- V(roundrobin) randomly selects a URL out of the list of URLs to start
with and proceeds through each of them as it encounters a failure
contacting the host.
- V(priority) starts from the first O(baseurl) listed and reads through
them sequentially.
type: str
file:
description:
- File name without the C(.repo) extension to save the repo in. Defaults
to the value of O(name).
type: str
gpgcakey:
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: str
gpgcheck:
description:
- Tells yum whether or not it should perform a GPG signature check on
packages.
- No default setting. If the value is not set, the system setting from
C(/etc/yum.conf) or system default of V(false) will be used.
type: bool
gpgkey:
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
- It can also be a list of multiple URLs.
type: list
elements: str
module_hotfixes:
description:
- Disable module RPM filtering and make all RPMs from the repository
available. The default is V(null).
version_added: '2.11'
type: bool
http_caching:
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
downloads that Yum does.
- V(all) means that all HTTP downloads should be cached.
- V(packages) means that only RPM package downloads should be cached (but
not repository metadata downloads).
- V(none) means that no HTTP downloads should be cached.
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
choices: [all, packages, none]
type: str
include:
description:
- Include external configuration file. Both, local path and URL is
supported. Configuration file will be inserted at the position of the
C(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
type: str
includepkgs:
description:
- List of packages you want to only use from a repository. This should be
a space separated list. Shell globs using wildcards (for example V(*) and V(?))
are allowed. Substitution variables (for example V($releasever)) are honored
here.
- The list can also be a regular YAML array.
type: list
elements: str
ip_resolve:
description:
- Determines how yum resolves host names.
- V(4) or V(IPv4) - resolve to IPv4 addresses only.
- V(6) or V(IPv6) - resolve to IPv6 addresses only.
choices: ['4', '6', IPv4, IPv6, whatever]
type: str
keepalive:
description:
- This tells yum whether or not HTTP/1.1 keepalive should be used with
this repository. This can improve transfer speeds by using one
connection when downloading multiple files from a repository.
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: bool
keepcache:
description:
- Either V(1) or V(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
- This parameter is deprecated as it is only valid in the main configuration
and will be removed in ansible-core 2.20.
choices: ['0', '1']
type: str
metadata_expire:
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
type: str
metadata_expire_filter:
description:
- Filter the O(metadata_expire) time, allowing a trade of speed for
accuracy if a command doesn't require it. Each yum command can specify
that it requires a certain level of timeliness quality from the remote
repos. from "I'm about to install/upgrade, so this better be current"
to "Anything that's available is good enough".
- V(never) - Nothing is filtered, always obey O(metadata_expire).
- V(read-only:past) - Commands that only care about past information are
filtered from metadata expiring. Eg. C(yum history) info (if history
needs to lookup anything about a previous transaction, then by
definition the remote package was available in the past).
- V(read-only:present) - Commands that are balanced between past and
future. Eg. C(yum list yum).
- V(read-only:future) - Commands that are likely to result in running
other commands which will require the latest metadata. Eg.
C(yum check-update).
- Note that this option does not override C(yum clean expire-cache).
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
type: str
metalink:
description:
- Specifies a URL to a metalink file for the repomd.xml, a list of
mirrors for the entire repository are generated by converting the
mirrors for the repomd.xml file to a O(baseurl).
- This, the O(baseurl) or O(mirrorlist) parameters are required if O(state) is set to
V(present).
type: str
mirrorlist:
description:
- Specifies a URL to a file containing a list of baseurls.
- This, the O(baseurl) or O(metalink) parameters are required if O(state) is set to
V(present).
type: str
mirrorlist_expire:
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: str
name:
description:
- Unique repository ID. This option builds the section name of the repository in the repo file.
- This parameter is only required if O(state) is set to V(present) or
V(absent).
type: str
required: true
password:
description:
- Password to use with the username for basic authentication.
type: str
priority:
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
type: str
protect:
description:
- Protect packages from updates from other repositories.
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: bool
proxy:
description:
- URL to the proxy server that yum should use. Set to V(_none_) to
disable the global proxy setting.
type: str
proxy_password:
description:
- Password for this proxy.
type: str
proxy_username:
description:
- Username to use for proxy.
type: str
repo_gpgcheck:
description:
- This tells yum whether or not it should perform a GPG signature check
on the repodata from this repository.
type: bool
reposdir:
description:
- Directory where the C(.repo) files will be stored.
type: path
default: /etc/yum.repos.d
retries:
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to V(0) makes yum try forever.
type: str
s3_enabled:
description:
- Enables support for S3 repositories.
- This option only works if the YUM S3 plugin is installed.
type: bool
skip_if_unavailable:
description:
- If set to V(true) yum will continue running if this repository cannot be
contacted for any reason. This should be set carefully as all repos are
consulted for any given command.
type: bool
ssl_check_cert_permissions:
description:
- Whether yum should check the permissions on the paths for the
certificates on the repository (both remote and local).
- If we can't read any of the files then yum will force
O(skip_if_unavailable) to be V(true). This is most useful for non-root
processes which use yum on repos that have client cert files which are
readable only by root.
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: bool
sslcacert:
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
type: str
aliases: [ ca_cert ]
sslclientcert:
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
type: str
aliases: [ client_cert ]
sslclientkey:
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
type: str
aliases: [ client_key ]
sslverify:
description:
- Defines whether yum should verify SSL certificates/hosts at all.
type: bool
aliases: [ validate_certs ]
state:
description:
- State of the repo file.
choices: [absent, present]
type: str
default: present
throttle:
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
type: str
timeout:
description:
- Number of seconds to wait for a connection before timing out.
type: str
ui_repoid_vars:
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the O(baseurl)/etc. Variables are appended
in the order listed (and found).
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: str
username:
description:
- Username to use for basic authentication to a repo or really any url.
type: str
extends_documentation_fragment:
- action_common_attributes
- files
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: rhel
notes:
- All comments will be removed if modifying an existing repo file.
- Section order is preserved in an existing repo file.
- Parameters in a section are ordered alphabetically in an existing repo
file.
- The repo file will be automatically deleted if it contains no repository.
- When removing a repository, beware that the metadata cache may still remain
on disk until you run C(yum clean all). Use a notification handler for this.
- "The O(ignore:params) parameter was removed in Ansible 2.5 due to circumventing Ansible's parameter
handling"
"""
EXAMPLES = """
- name: Add repository
ansible.builtin.yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add multiple repositories into the same file (1/2)
ansible.builtin.yum_repository:
name: epel
description: EPEL YUM repo
file: external_repos
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
- name: Add multiple repositories into the same file (2/2)
ansible.builtin.yum_repository:
name: rpmforge
description: RPMforge YUM repo
file: external_repos
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
enabled: no
# Handler showing how to clean yum metadata cache
- name: yum-clean-metadata
ansible.builtin.command: yum clean metadata
# Example removing a repository and cleaning up metadata cache
- name: Remove repository (and clean up left-over metadata)
ansible.builtin.yum_repository:
name: epel
state: absent
notify: yum-clean-metadata
- name: Remove repository from a specific repo file
ansible.builtin.yum_repository:
name: epel
file: external_repos
state: absent
"""
RETURN = """
repo:
description: repository name
returned: success
type: str
sample: "epel"
state:
description: state of the target, after execution
returned: success
type: str
sample: "present"
"""
import configparser
import os
from ansible.module_utils.basic import AnsibleModule, FILE_COMMON_ARGUMENTS
from ansible.module_utils.common.text.converters import to_native
class YumRepo:
def __init__(self, module, params, repoid, dest):
self.module = module
self.params = params
self.section = repoid
self.repofile = configparser.RawConfigParser()
self.dest = dest
if os.path.isfile(dest):
self.repofile.read(dest)
def add(self):
self.remove()
self.repofile.add_section(self.section)
for key, value in sorted(self.params.items()):
if value is None:
continue
if key == 'keepcache':
self.module.deprecate(
"'keepcache' parameter is deprecated as it is only valid in "
"the main configuration.",
version='2.20'
)
elif key == 'async':
self.module.deprecate(
"'async' parameter is deprecated as it has been removed on systems supported by ansible-core",
version='2.22',
)
elif key in {
"deltarpm_metadata_percentage",
"gpgcakey",
"http_caching",
"keepalive",
"metadata_expire_filter",
"mirrorlist_expire",
"protect",
"ssl_check_cert_permissions",
"ui_repoid_vars",
}:
self.module.deprecate(
f"'{key}' parameter is deprecated as it has no effect with dnf "
"as an underlying package manager.",
version='2.22'
)
if isinstance(value, bool):
value = str(int(value))
self.repofile.set(self.section, key, value)
def save(self):
if self.repofile.sections():
try:
with open(self.dest, 'w') as fd:
self.repofile.write(fd)
except IOError as e:
self.module.fail_json(
msg=f"Problems handling file {self.dest}.",
details=to_native(e),
)
else:
try:
os.remove(self.dest)
except OSError as e:
self.module.fail_json(
msg=f"Cannot remove empty repo file {self.dest}.",
details=to_native(e),
)
def remove(self):
self.repofile.remove_section(self.section)
def dump(self):
repo_string = ""
for section in sorted(self.repofile.sections()):
repo_string += "[%s]\n" % section
for key, value in sorted(self.repofile.items(section)):
repo_string += "%s = %s\n" % (key, value)
repo_string += "\n"
return repo_string
def main():
argument_spec = dict(
bandwidth=dict(),
baseurl=dict(type='list', elements='str'),
cost=dict(),
countme=dict(type='bool'),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(type='list', elements='str', aliases=['excludepkgs']),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(no_log=False),
gpgcheck=dict(type='bool'),
gpgkey=dict(type='list', elements='str', no_log=False),
module_hotfixes=dict(type='bool'),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(type='list', elements='str'),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(required=True),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d', type='path'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(aliases=['ca_cert']),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(aliases=['client_cert']),
sslclientkey=dict(aliases=['client_key'], no_log=False),
sslverify=dict(type='bool', aliases=['validate_certs']),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
)
# async is a Python keyword
argument_spec['async'] = dict(type='bool')
module = AnsibleModule(
required_if=[
["state", "present", ["baseurl", "mirrorlist", "metalink"], True],
["state", "present", ["description"]],
],
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# make copy of params as we need to split them into yum repo only and file params
yum_repo_params = module.params.copy()
for alias in module.aliases:
yum_repo_params.pop(alias, None)
file_common_params = {}
for param in FILE_COMMON_ARGUMENTS:
file_common_params[param] = yum_repo_params.pop(param)
state = yum_repo_params.pop("state")
name = yum_repo_params['name']
yum_repo_params['name'] = yum_repo_params.pop('description')
for list_param in ('baseurl', 'gpgkey'):
v = yum_repo_params[list_param]
if v is not None:
yum_repo_params[list_param] = '\n'.join(v)
for list_param in ('exclude', 'includepkgs'):
v = yum_repo_params[list_param]
if v is not None:
yum_repo_params[list_param] = ' '.join(v)
repos_dir = yum_repo_params.pop("reposdir")
if not os.path.isdir(repos_dir):
module.fail_json(
msg="Repo directory '%s' does not exist." % repos_dir
)
if (file := yum_repo_params.pop("file")) is None:
file = name
file_common_params["dest"] = os.path.join(repos_dir, f"{file}.repo")
yumrepo = YumRepo(module, yum_repo_params, name, file_common_params["dest"])
diff = {
'before_header': file_common_params["dest"],
'before': yumrepo.dump(),
'after_header': file_common_params["dest"],
'after': ''
}
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
diff['after'] = yumrepo.dump()
changed = diff['before'] != diff['after']
if not module.check_mode and changed:
yumrepo.save()
if os.path.isfile(file_common_params["dest"]):
file_args = module.load_file_common_arguments(file_common_params)
changed = module.set_fs_attributes_if_different(file_args, changed)
module.exit_json(changed=changed, repo=name, state=state, diff=diff)
if __name__ == '__main__':
main()
| 24,461
|
Python
|
.py
| 623
| 32.094703
| 124
| 0.659255
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,185
|
tempfile.py
|
ansible_ansible/lib/ansible/modules/tempfile.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: tempfile
version_added: "2.3"
short_description: Creates temporary files and directories
description:
- The M(ansible.builtin.tempfile) module creates temporary files and directories. C(mktemp) command
takes different parameters on various systems, this module helps to avoid troubles related to that.
Files/directories created by module are accessible only by creator. In case you need to make them
world-accessible you need to use M(ansible.builtin.file) module.
- For Windows targets, use the M(ansible.windows.win_tempfile) module instead.
options:
state:
description:
- Whether to create file or directory.
type: str
choices: [ directory, file ]
default: file
path:
description:
- Location where temporary file or directory should be created.
- If path is not specified, the default system temporary directory will be used.
type: path
prefix:
description:
- Prefix of file/directory name created by module.
type: str
default: ansible.
suffix:
description:
- Suffix of file/directory name created by module.
type: str
default: ""
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
platform:
platforms: posix
seealso:
- module: ansible.builtin.file
- module: ansible.windows.win_tempfile
author:
- Krzysztof Magosa (@krzysztof-magosa)
"""
EXAMPLES = """
- name: Create temporary build directory
ansible.builtin.tempfile:
state: directory
suffix: build
- name: Create temporary file
ansible.builtin.tempfile:
state: file
suffix: temp
register: tempfile_1
- name: Create a temporary file with a specific prefix
ansible.builtin.tempfile:
state: file
suffix: txt
prefix: myfile_
- name: Use the registered var and the file module to remove the temporary file
ansible.builtin.file:
path: "{{ tempfile_1.path }}"
state: absent
when: tempfile_1.path is defined
"""
RETURN = """
path:
description: Path to created file or directory.
returned: success
type: str
sample: "/tmp/ansible.bMlvdk"
"""
from os import close
from tempfile import mkstemp, mkdtemp
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='file', choices=['file', 'directory']),
path=dict(type='path'),
prefix=dict(type='str', default='ansible.'),
suffix=dict(type='str', default=''),
),
)
try:
if module.params['state'] == 'file':
handle, path = mkstemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
close(handle)
else:
path = mkdtemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
module.exit_json(changed=True, path=path)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| 3,627
|
Python
|
.py
| 113
| 26.778761
| 103
| 0.680869
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,186
|
validate_argument_spec.py
|
ansible_ansible/lib/ansible/modules/validate_argument_spec.py
|
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: validate_argument_spec
short_description: Validate role argument specs.
description:
- This module validates role arguments with a defined argument specification.
version_added: "2.11"
options:
argument_spec:
description:
- A dictionary like AnsibleModule argument_spec. See R(argument spec definition,argument_spec).
required: true
provided_arguments:
description:
- A dictionary of the arguments that will be validated according to argument_spec.
author:
- Ansible Core Team
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
become:
support: none
bypass_host_loop:
support: none
connection:
support: none
check_mode:
support: full
delegation:
support: none
diff_mode:
support: none
platform:
platforms: all
"""
EXAMPLES = r"""
- name: verify vars needed for this task file are present when included
ansible.builtin.validate_argument_spec:
argument_spec: '{{ required_data }}'
vars:
required_data:
# unlike spec file, just put the options in directly
stuff:
description: stuff
type: str
choices: ['who', 'knows', 'what']
default: what
but:
description: i guess we need one
type: str
required: true
- name: verify vars needed for this task file are present when included, with spec from a spec file
ansible.builtin.validate_argument_spec:
argument_spec: "{{ (lookup('ansible.builtin.file', 'myargspec.yml') | from_yaml )['specname']['options'] }}"
- name: verify vars needed for next include and not from inside it, also with params i'll only define there
block:
- ansible.builtin.validate_argument_spec:
argument_spec: "{{ lookup('ansible.builtin.file', 'nakedoptions.yml') }}"
provided_arguments:
but: "that i can define on the include itself, like in it's `vars:` keyword"
- name: the include itself
vars:
stuff: knows
but: nobuts!
"""
RETURN = r"""
argument_errors:
description: A list of arg validation errors.
returned: failure
type: list
elements: str
sample:
- "error message 1"
- "error message 2"
argument_spec_data:
description: A dict of the data from the 'argument_spec' arg.
returned: failure
type: dict
sample:
some_arg:
type: "str"
some_other_arg:
type: "int"
required: true
validate_args_context:
description: A dict of info about where validate_args_spec was used
type: dict
returned: always
sample:
name: my_role
type: role
path: /home/user/roles/my_role/
argument_spec_name: main
"""
| 3,042
|
Python
|
.py
| 104
| 24.403846
| 112
| 0.684786
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,187
|
stat.py
|
ansible_ansible/lib/ansible/modules/stat.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: stat
version_added: "1.3"
short_description: Retrieve file or file system status
description:
- Retrieves facts for a file similar to the Linux/Unix C(stat) command.
- For Windows targets, use the M(ansible.windows.win_stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of.
type: path
required: true
aliases: [ dest, name ]
follow:
description:
- Whether to follow symlinks.
type: bool
default: no
get_mime:
description:
- Use file magic and return data about the nature of the file. This uses
the C(file) utility found on most Linux/Unix systems.
- This will add both RV(stat.mimetype) and RV(stat.charset) fields to the return, if possible.
- In Ansible 2.3 this option changed from O(mime) to O(get_mime) and the default changed to V(true).
type: bool
default: yes
aliases: [ mime, mime_type, mime-type ]
version_added: "2.1"
get_attributes:
description:
- Get file attributes using lsattr tool if present.
type: bool
default: yes
aliases: [ attr, attributes ]
version_added: "2.3"
get_checksum:
version_added: "1.8"
extends_documentation_fragment:
- action_common_attributes
- checksum_common
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
seealso:
- module: ansible.builtin.file
- module: ansible.windows.win_stat
author: Bruce Pennypacker (@bpennypacker)
"""
EXAMPLES = r"""
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
- name: Get stats of a file
ansible.builtin.stat:
path: /etc/foo.conf
register: st
- name: Fail if the file does not belong to 'root'
ansible.builtin.fail:
msg: "Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
# not exist, and we test sym.stat.islnk, it will fail with an error. So
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
- name: Get stats of the FS object
ansible.builtin.stat:
path: /path/to/something
register: sym
- name: Print a debug message
ansible.builtin.debug:
msg: "islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
- name: Print a debug message
ansible.builtin.debug:
msg: "islnk is defined (path must exist)"
when: sym.stat.islnk is defined
- name: Print a debug message
ansible.builtin.debug:
msg: "Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
- name: Print a debug message
ansible.builtin.debug:
msg: "Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- name: Get stats of the FS object
ansible.builtin.stat:
path: /path/to/something
register: p
- name: Print a debug message
ansible.builtin.debug:
msg: "Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
- name: Do not calculate the checksum
ansible.builtin.stat:
path: /path/to/myhugefile
get_checksum: no
- name: Use sha256 to calculate the checksum
ansible.builtin.stat:
path: /path/to/something
checksum_algorithm: sha256
"""
RETURN = r"""
stat:
description: Dictionary containing all the stat data, some platforms might add additional fields.
returned: success
type: dict
contains:
exists:
description: If the destination path actually exists or not
returned: success
type: bool
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: str
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal representation as a string
returned: success, path exists and user can read stats
type: str
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: bool
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: bool
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: bool
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: bool
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: bool
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: bool
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: bool
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, amount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: bool
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: bool
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: bool
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: bool
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: bool
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: bool
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: bool
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: bool
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: bool
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: bool
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: bool
sample: False
lnk_source:
description: Target of the symlink normalized for the remote filesystem
returned: success, path exists and user can read stats and the path is a symbolic link
type: str
sample: /home/foobar/21102015-1445431274-908472971
lnk_target:
description: Target of the symlink. Note that relative paths remain relative
returned: success, path exists and user can read stats and the path is a symbolic link
type: str
sample: ../foobar/21102015-1445431274-908472971
version_added: 2.4
checksum:
description: hash of the file
returned: success, path exists, user can read stats, path supports
hashing and supplied checksum algorithm is available
type: str
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
pw_name:
description: User name of owner
returned: success, path exists, user can read stats, owner name can be looked up and installed python supports it
type: str
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists, user can read stats, owner group can be looked up and installed python supports it
type: str
sample: www-data
mimetype:
description: file magic data or mime-type
returned: success, path exists and user can read stats and
installed python supports it and the O(get_mime) option was V(true), will
return V(unknown) on error.
type: str
sample: application/pdf; charset=binary
charset:
description: file character set or encoding
returned: success, path exists and user can read stats and
installed python supports it and the O(get_mime) option was V(true), will
return V(unknown) on error.
type: str
sample: us-ascii
readable:
description: Tells you if the invoking user has the right to read the path
returned: success, path exists and user can read the path
type: bool
sample: False
version_added: 2.2
writeable:
description: Tells you if the invoking user has the right to write the path
returned: success, path exists and user can write the path
type: bool
sample: False
version_added: 2.2
executable:
description: Tells you if the invoking user has execute permission on the path
returned: success, path exists and user can execute the path
type: bool
sample: False
version_added: 2.2
attributes:
description: list of file attributes
returned: success, path exists and user can execute the path
type: list
sample: [ immutable, extent ]
version_added: 2.3
version:
description: The version/generation attribute of a file according to the filesystem
returned: success, path exists, user can execute the path, lsattr is available and filesystem supports
type: str
sample: "381700746"
version_added: 2.3
"""
import errno
import grp
import os
import pwd
import stat
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes
def format_output(module, path, st):
mode = st.st_mode
# back to ansible
output = dict(
exists=True,
path=path,
mode="%04o" % stat.S_IMODE(mode),
isdir=stat.S_ISDIR(mode),
ischr=stat.S_ISCHR(mode),
isblk=stat.S_ISBLK(mode),
isreg=stat.S_ISREG(mode),
isfifo=stat.S_ISFIFO(mode),
islnk=stat.S_ISLNK(mode),
issock=stat.S_ISSOCK(mode),
uid=st.st_uid,
gid=st.st_gid,
size=st.st_size,
inode=st.st_ino,
dev=st.st_dev,
nlink=st.st_nlink,
atime=st.st_atime,
mtime=st.st_mtime,
ctime=st.st_ctime,
wusr=bool(mode & stat.S_IWUSR),
rusr=bool(mode & stat.S_IRUSR),
xusr=bool(mode & stat.S_IXUSR),
wgrp=bool(mode & stat.S_IWGRP),
rgrp=bool(mode & stat.S_IRGRP),
xgrp=bool(mode & stat.S_IXGRP),
woth=bool(mode & stat.S_IWOTH),
roth=bool(mode & stat.S_IROTH),
xoth=bool(mode & stat.S_IXOTH),
isuid=bool(mode & stat.S_ISUID),
isgid=bool(mode & stat.S_ISGID),
)
# Platform dependent flags:
for other in [
# Some Linux
('st_blocks', 'blocks'),
('st_blksize', 'block_size'),
('st_rdev', 'device_type'),
('st_flags', 'flags'),
# Some Berkley based
('st_gen', 'generation'),
('st_birthtime', 'birthtime'),
# RISCOS
('st_ftype', 'file_type'),
('st_attrs', 'attrs'),
('st_obtype', 'object_type'),
# macOS
('st_rsize', 'real_size'),
('st_creator', 'creator'),
('st_type', 'file_type'),
]:
if hasattr(st, other[0]):
output[other[1]] = getattr(st, other[0])
return output
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'name']),
follow=dict(type='bool', default=False),
get_checksum=dict(type='bool', default=True),
get_mime=dict(type='bool', default=True, aliases=['mime', 'mime_type', 'mime-type']),
get_attributes=dict(type='bool', default=True, aliases=['attr', 'attributes']),
checksum_algorithm=dict(type='str', default='sha1',
choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
aliases=['checksum', 'checksum_algo']),
),
supports_check_mode=True,
)
path = module.params.get('path')
b_path = to_bytes(path, errors='surrogate_or_strict')
follow = module.params.get('follow')
get_mime = module.params.get('get_mime')
get_attr = module.params.get('get_attributes')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
# main stat data
try:
if follow:
st = os.stat(b_path)
else:
st = os.lstat(b_path)
except OSError as e:
if e.errno == errno.ENOENT:
output = {'exists': False}
module.exit_json(changed=False, stat=output)
module.fail_json(msg=e.strerror)
# process base results
output = format_output(module, path, st)
# resolved permissions
for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
output[perm[0]] = os.access(b_path, perm[1])
# symlink info
if output.get('islnk'):
output['lnk_source'] = os.path.realpath(b_path)
output['lnk_target'] = os.readlink(b_path)
try: # user data
pw = pwd.getpwuid(st.st_uid)
output['pw_name'] = pw.pw_name
except (TypeError, KeyError):
pass
try: # group data
grp_info = grp.getgrgid(st.st_gid)
output['gr_name'] = grp_info.gr_name
except (KeyError, ValueError, OverflowError):
pass
# checksums
if output.get('isreg') and output.get('readable'):
if get_checksum:
output['checksum'] = module.digest_from_file(b_path, checksum_algorithm)
# try to get mime data if requested
if get_mime:
output['mimetype'] = output['charset'] = 'unknown'
mimecmd = module.get_bin_path('file')
if mimecmd:
mimecmd = [mimecmd, '--mime-type', '--mime-encoding', b_path]
try:
rc, out, err = module.run_command(mimecmd)
if rc == 0:
mimetype, charset = out.rsplit(':', 1)[1].split(';')
output['mimetype'] = mimetype.strip()
output['charset'] = charset.split('=')[1].strip()
except Exception:
pass
# try to get attr data
if get_attr:
output['version'] = None
output['attributes'] = []
output['attr_flags'] = ''
out = module.get_file_attributes(b_path)
for x in ('version', 'attributes', 'attr_flags'):
if x in out:
output[x] = out[x]
module.exit_json(changed=False, stat=output)
if __name__ == '__main__':
main()
| 18,658
|
Python
|
.py
| 486
| 29.148148
| 126
| 0.610345
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,188
|
gather_facts.py
|
ansible_ansible/lib/ansible/modules/gather_facts.py
|
# -*- coding: utf-8 -*-
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: gather_facts
version_added: 2.8
short_description: Gathers facts about remote hosts
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
- action_common_attributes.flow
description:
- This module takes care of executing the R(configured facts modules,FACTS_MODULES), the default is to use the M(ansible.builtin.setup) module.
- This module is automatically called by playbooks to gather useful variables about remote hosts that can be used in playbooks.
- It can also be executed directly by C(/usr/bin/ansible) to check what variables are available to a host.
- Ansible provides many I(facts) about the system, automatically.
options:
parallel:
description:
- A toggle that controls if the fact modules are executed in parallel or serially and in order.
This can guarantee the merge order of module facts at the expense of performance.
- By default it will be true if more than one fact module is used.
- For low cost/delay fact modules parallelism overhead might end up meaning the whole process takes longer.
Test your specific case to see if it is a speed improvement or not.
- The C(ansible_facts_parallel) variable can be used to set this option,
overriding the default, but not the direct assignment of the option in the task.
type: bool
attributes:
action:
support: full
async:
details: while this action does not support the task 'async' keywords it can do its own parallel processing using the O(parallel) option.
support: none
bypass_host_loop:
support: none
check_mode:
details: since this action should just query the target system info it always runs in check mode
support: full
diff_mode:
support: none
facts:
support: full
platform:
details: The action plugin should be able to automatically select the specific platform modules automatically or can be configured manually
platforms: all
notes:
- This is mostly a wrapper around other fact gathering modules.
- Options passed into this action must be supported by all the underlying fact modules configured.
- If using O(ignore:gather_timeout) and parallel execution, it will limit the total execution time of
modules that do not accept O(ignore:gather_timeout) themselves.
- Facts returned by each module will be merged, conflicts will favor 'last merged'.
Order is not guaranteed, when doing parallel gathering on multiple modules.
author:
- "Ansible Core Team"
"""
RETURN = """
# depends on the fact module called
"""
EXAMPLES = """
# Display facts from all hosts and store them indexed by hostname at /tmp/facts.
# ansible all -m ansible.builtin.gather_facts --tree /tmp/facts
"""
| 3,107
|
Python
|
.py
| 64
| 43.15625
| 148
| 0.728111
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,189
|
async_wrapper.py
|
ansible_ansible/lib/ansible/modules/async_wrapper.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import errno
import json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
import multiprocessing
from ansible.module_utils.common.text.converters import to_text, to_bytes
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# pipe for communication between forked process and parent
ipc_watcher, ipc_notifier = multiprocessing.Pipe()
job_path = ''
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def end(res=None, exit_msg=0):
if res is not None:
print(json.dumps(res))
sys.stdout.flush()
sys.exit(exit_msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
end()
except OSError:
e = sys.exc_info()[1]
end({'msg': "fork #1 failed: %d (%s)\n" % (e.errno, e.strerror), 'failed': True}, 1)
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# TODO: print 'async_wrapper_pid': pid, but careful as it will pollute expected output.
end()
except OSError:
e = sys.exc_info()[1]
end({'msg': "fork #2 failed: %d (%s)\n" % (e.errno, e.strerror), 'failed': True}, 1)
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
"""
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{', and filter all
trailing lines after matching close character (working from the bottom of output).
"""
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(u'}'):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
with open(module_path, 'rb') as module_fd:
head = module_fd.read(1024)
if head[0:2] != b'#!':
return None
return head[2:head.index(b'\n')].strip().split(b' ')
def _make_temp_dir(path):
# TODO: Add checks for permissions on path.
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def jwrite(info):
jobfile = job_path + ".tmp"
tjob = open(jobfile, "w")
try:
tjob.write(json.dumps(info))
except (IOError, OSError) as e:
notice('failed to write to %s: %s' % (jobfile, str(e)))
raise e
finally:
tjob.close()
os.rename(jobfile, job_path)
def _run_module(wrapped_cmd, jid):
jwrite({"started": 1, "finished": 0, "ansible_job_id": jid})
result = {}
# signal grandchild process started and isolated from being terminated
# by the connection being closed sending a signal to the job group
ipc_notifier.send(True)
ipc_notifier.close()
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(wrapped_cmd)]
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
text=True,
encoding="utf-8",
errors="surrogateescape",
)
(outdata, stderr) = script.communicate()
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jwrite(result)
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd": wrapped_cmd,
"msg": to_text(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jwrite(result)
except (ValueError, Exception):
result = {
"failed": 1,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice only
"stderr": stderr,
"msg": traceback.format_exc()
}
result['ansible_job_id'] = jid
jwrite(result)
def main():
if len(sys.argv) < 5:
end({
"failed": True,
"msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}, 1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup job output directory
jobdir = os.path.expanduser(async_dir)
global job_path
job_path = os.path.join(jobdir, jid)
try:
_make_temp_dir(jobdir)
except Exception as e:
end({
"failed": 1,
"msg": "could not create directory: %s - %s" % (jobdir, to_text(e)),
"exception": to_text(traceback.format_exc()),
}, 1)
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
# close off notifier handle in grandparent, probably unnecessary as
# this process doesn't hang around long enough
ipc_notifier.close()
# allow waiting up to 2.5 seconds in total should be long enough for worst
# loaded environment in practice.
retries = 25
while retries > 0:
if ipc_watcher.poll(0.1):
break
else:
retries = retries - 1
continue
notice("Return async_wrapper task started.")
end({"failed": 0, "started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": (not preserve_tmp)}, 0)
else:
# The actual wrapper process
# close off the receiving end of the pipe from child process
ipc_watcher.close()
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# close off inherited pipe handles
ipc_watcher.close()
ipc_notifier.close()
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)" % (sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)" % (sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
# ensure we leave response in poll location
res = {'msg': 'Timeout exceeded', 'failed': True, 'child_pid': sub_pid}
jwrite(res)
# actually kill it
notice("Timeout reached, now killing %s" % (sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s " % sub_pid)
time.sleep(1)
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
end(res)
notice("Done in kid B.")
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
end()
else:
# the child process runs the actual module
notice("Start module (%s)" % os.getpid())
_run_module(cmd, jid)
notice("Module complete (%s)" % os.getpid())
except Exception as e:
notice("error: %s" % e)
end({"failed": True, "msg": "FATAL ERROR: %s" % e}, "async_wrapper exited prematurely")
if __name__ == '__main__':
main()
| 11,619
|
Python
|
.py
| 282
| 31.556738
| 120
| 0.589598
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,190
|
assemble.py
|
ansible_ansible/lib/ansible/modules/assemble.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: assemble
short_description: Assemble configuration files from fragments
description:
- Assembles a configuration file from fragments.
- Often a particular program will take a single configuration file and does not support a
C(conf.d) style structure where it is easy to build up the configuration
from multiple sources. M(ansible.builtin.assemble) will take a directory of files that can be
local or have already been transferred to the system, and concatenate them
together to produce a destination file.
- Files are assembled in string sorting order.
- Puppet calls this idea I(fragments).
version_added: '0.5'
options:
src:
description:
- An already existing directory full of source files.
type: path
required: true
dest:
description:
- A file to create using the concatenation of all of the source files.
type: path
required: true
backup:
description:
- Create a backup file (if V(true)), including the timestamp information so
you can get the original file back if you somehow clobbered it
incorrectly.
type: bool
default: no
delimiter:
description:
- A delimiter to separate the file contents.
type: str
version_added: '1.4'
remote_src:
description:
- If V(false), it will search for src at originating/master machine.
- If V(true), it will go to the remote/target machine for the src.
type: bool
default: yes
version_added: '1.4'
regexp:
description:
- Assemble files only if the given regular expression matches the filename.
- If not set, all files are assembled.
- Every V(\\) (backslash) must be escaped as V(\\\\) to comply to YAML syntax.
- Uses L(Python regular expressions,https://docs.python.org/3/library/re.html).
type: str
ignore_hidden:
description:
- A boolean that controls if files that start with a C(.) will be included or not.
type: bool
default: no
version_added: '2.0'
validate:
description:
- The validation command to run before copying into place.
- The path to the file to validate is passed in by C(%s) which must be present as in the sshd example below.
- The command is passed securely so shell features like expansion and pipes won't work.
type: str
version_added: '2.0'
attributes:
action:
support: full
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: none
diff_mode:
support: full
platform:
platforms: posix
safe_file_operations:
support: full
vault:
support: full
version_added: '2.2'
seealso:
- module: ansible.builtin.copy
- module: ansible.builtin.template
- module: ansible.windows.win_copy
author:
- Stephen Fromm (@sfromm)
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
- action_common_attributes.files
- decrypt
- files
"""
EXAMPLES = r"""
- name: Assemble from fragments from a directory
ansible.builtin.assemble:
src: /etc/someapp/fragments
dest: /etc/someapp/someapp.conf
- name: Insert the provided delimiter between fragments
ansible.builtin.assemble:
src: /etc/someapp/fragments
dest: /etc/someapp/someapp.conf
delimiter: '### START FRAGMENT ###'
- name: Assemble a new "sshd_config" file into place, after passing validation with sshd
ansible.builtin.assemble:
src: /etc/ssh/conf.d/
dest: /etc/ssh/sshd_config
validate: /usr/sbin/sshd -t -f %s
"""
RETURN = r"""#"""
import codecs
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b, indexbytes
from ansible.module_utils.common.text.converters import to_native
def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None):
""" assemble a file from a directory of fragments """
tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir)
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = os.path.join(src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
with open(fragment, 'rb') as fragment_fh:
fragment_content = fragment_fh.read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b('\n'))
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
# byte indexing differs on Python 2 and 3,
# use indexbytes for compat
# chr(10) == '\n'
if indexbytes(delimiter, -1) != 10:
tmp.write(b('\n'))
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b('\n')):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def cleanup(path, result=None):
# cleanup just in case
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError) as e:
# don't error on possible race conditions, but keep warning
if result is not None:
result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(type='path', required=True),
delimiter=dict(type='str'),
dest=dict(type='path', required=True),
backup=dict(type='bool', default=False),
remote_src=dict(type='bool', default=True),
regexp=dict(type='str'),
ignore_hidden=dict(type='bool', default=False),
validate=dict(type='str'),
# Options that are for the action plugin, but ignored by the module itself.
# We have them here so that the tests pass without ignores, which
# reduces the likelihood of further bugs added.
decrypt=dict(type='bool', default=True),
),
add_file_common_args=True,
)
changed = False
path_hash = None
dest_hash = None
src = module.params['src']
dest = module.params['dest']
backup = module.params['backup']
delimiter = module.params['delimiter']
regexp = module.params['regexp']
compiled_regexp = None
ignore_hidden = module.params['ignore_hidden']
validate = module.params.get('validate', None)
result = dict(src=src, dest=dest)
if not os.path.exists(src):
module.fail_json(msg="Source (%s) does not exist" % src)
if not os.path.isdir(src):
module.fail_json(msg="Source (%s) is not a directory" % src)
if regexp is not None:
try:
compiled_regexp = re.compile(regexp)
except re.error as e:
module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp))
if validate and "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % validate)
path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir)
path_hash = module.sha1(path)
result['checksum'] = path_hash
# Backwards compat. This won't return data if FIPS mode is active
try:
pathmd5 = module.md5(path)
except ValueError:
pathmd5 = None
result['md5sum'] = pathmd5
if os.path.exists(dest):
dest_hash = module.sha1(dest)
if path_hash != dest_hash:
if validate:
(rc, out, err) = module.run_command(validate % path)
result['validation'] = dict(rc=rc, stdout=out, stderr=err)
if rc != 0:
cleanup(path)
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
if backup and dest_hash is not None:
result['backup_file'] = module.backup_local(dest)
module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
changed = True
cleanup(path, result)
# handle file permissions
file_args = module.load_file_common_arguments(module.params)
result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
# Mission complete
result['msg'] = "OK"
module.exit_json(**result)
if __name__ == '__main__':
main()
| 9,257
|
Python
|
.py
| 245
| 31.302041
| 112
| 0.660091
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,191
|
blockinfile.py
|
ansible_ansible/lib/ansible/modules/blockinfile.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: blockinfile
short_description: Insert/update/remove a text block surrounded by marker lines
version_added: '2.0'
description:
- This module will insert/update/remove a block of multi-line text surrounded by customizable marker lines.
author:
- Yaegashi Takeshi (@yaegashi)
options:
path:
description:
- The file to modify.
- Before Ansible 2.3 this option was only usable as O(dest), O(destfile) and O(name).
type: path
required: yes
aliases: [ dest, destfile, name ]
state:
description:
- Whether the block should be there or not.
type: str
choices: [ absent, present ]
default: present
marker:
description:
- The marker line template.
- C({mark}) will be replaced with the values in O(marker_begin) (default=C(BEGIN)) and O(marker_end) (default=C(END)).
- Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs.
- Multi-line markers are not supported and will result in the block being repeatedly inserted on subsequent playbook runs.
- A newline is automatically appended by the module to O(marker_begin) and O(marker_end).
type: str
default: '# {mark} ANSIBLE MANAGED BLOCK'
block:
description:
- The text to insert inside the marker lines.
- If it is missing or an empty string, the block will be removed as if O(state) were specified to V(absent).
type: str
default: ''
aliases: [ content ]
insertafter:
description:
- If specified and no begin/ending O(marker) lines are found, the block will be inserted after the last match of specified regular expression.
- A special value is available; V(EOF) for inserting the block at the end of the file.
- If specified regular expression has no matches or no value is passed, V(EOF) will be used instead.
- The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
This behaviour was added in ansible-core 2.14.
type: str
insertbefore:
description:
- If specified and no begin/ending O(marker) lines are found, the block will be inserted before the last match of specified regular expression.
- A special value is available; V(BOF) for inserting the block at the beginning of the file.
- If specified regular expression has no matches, the block will be inserted at the end of the file.
- The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
This behaviour was added in ansible-core 2.14.
type: str
create:
description:
- Create a new file if it does not exist.
type: bool
default: no
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
marker_begin:
description:
- This will be inserted at C({mark}) in the opening ansible block O(marker).
type: str
default: BEGIN
version_added: '2.5'
marker_end:
required: false
description:
- This will be inserted at C({mark}) in the closing ansible block O(marker).
type: str
default: END
version_added: '2.5'
append_newline:
required: false
description:
- Append a blank line to the inserted block, if this does not appear at the end of the file.
- Note that this attribute is not considered when C(state) is set to C(absent)
type: bool
default: no
version_added: '2.16'
prepend_newline:
required: false
description:
- Prepend a blank line to the inserted block, if this does not appear at the beginning of the file.
- Note that this attribute is not considered when C(state) is set to C(absent)
type: bool
default: no
version_added: '2.16'
notes:
- When using C(with_*) loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
- As of Ansible 2.3, the O(dest) option has been changed to O(path) as default, but O(dest) still works as well.
- Option O(ignore:follow) has been removed in Ansible 2.5, because this module modifies the contents of the file
so O(ignore:follow=no) does not make sense.
- When more than one block should be handled in one file you must change the O(marker) per task.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.files
- files
- validate
attributes:
check_mode:
support: full
diff_mode:
support: full
safe_file_operations:
support: full
platform:
support: full
platforms: posix
vault:
support: none
"""
EXAMPLES = r"""
# Before Ansible 2.3, option 'dest' or 'name' was used instead of 'path'
- name: Insert/Update "Match User" configuration block in /etc/ssh/sshd_config prepending and appending a new line
ansible.builtin.blockinfile:
path: /etc/ssh/sshd_config
append_newline: true
prepend_newline: true
block: |
Match User ansible-agent
PasswordAuthentication no
- name: Insert/Update eth0 configuration stanza in /etc/network/interfaces
(it might be better to copy files into /etc/network/interfaces.d/)
ansible.builtin.blockinfile:
path: /etc/network/interfaces
block: |
iface eth0 inet static
address 192.0.2.23
netmask 255.255.255.0
- name: Insert/Update configuration using a local file and validate it
ansible.builtin.blockinfile:
block: "{{ lookup('ansible.builtin.file', './local/sshd_config') }}"
path: /etc/ssh/sshd_config
backup: yes
validate: /usr/sbin/sshd -T -f %s
- name: Insert/Update HTML surrounded by custom markers after <body> line
ansible.builtin.blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
insertafter: "<body>"
block: |
<h1>Welcome to {{ ansible_hostname }}</h1>
<p>Last updated on {{ ansible_date_time.iso8601 }}</p>
- name: Remove HTML as well as surrounding markers
ansible.builtin.blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
block: ""
- name: Add mappings to /etc/hosts
ansible.builtin.blockinfile:
path: /etc/hosts
block: |
{{ item.ip }} {{ item.name }}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
loop:
- { name: host1, ip: 10.10.1.10 }
- { name: host2, ip: 10.10.1.11 }
- { name: host3, ip: 10.10.1.12 }
- name: Search with a multiline search flags regex and if found insert after
blockinfile:
path: listener.ora
block: "{{ listener_line | indent(width=8, first=True) }}"
insertafter: '(?m)SID_LIST_LISTENER_DG =\n.*\(SID_LIST ='
marker: " <!-- {mark} ANSIBLE MANAGED BLOCK -->"
"""
import re
import os
import tempfile
from ansible.module_utils.six import b
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
with os.fdopen(tmpfd, 'wb') as tf:
tf.write(contents)
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message, diff):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False, diff=diff):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
marker=dict(type='str', default='# {mark} ANSIBLE MANAGED BLOCK'),
block=dict(type='str', default='', aliases=['content']),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
create=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
validate=dict(type='str'),
marker_begin=dict(type='str', default='BEGIN'),
marker_end=dict(type='str', default='END'),
append_newline=dict(type='bool', default=False),
prepend_newline=dict(type='bool', default=False),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
path = params['path']
if os.path.isdir(path):
module.fail_json(rc=256,
msg='Path %s is a directory !' % path)
path_exists = os.path.exists(path)
if not path_exists:
if not module.boolean(params['create']):
module.fail_json(rc=257,
msg='Path %s does not exist !' % path)
destpath = os.path.dirname(path)
if destpath and not os.path.exists(destpath) and not module.check_mode:
try:
os.makedirs(destpath)
except OSError as e:
module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e.errno, e.strerror))
except Exception as e:
module.fail_json(msg='Error creating %s Error: %s' % (destpath, to_native(e)))
original = None
lines = []
else:
with open(path, 'rb') as f:
original = f.read()
lines = original.splitlines(True)
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % path,
'after_header': '%s (content)' % path}
if module._diff and original:
diff['before'] = original
insertbefore = params['insertbefore']
insertafter = params['insertafter']
block = to_bytes(params['block'])
marker = to_bytes(params['marker'])
present = params['state'] == 'present'
blank_line = [b(os.linesep)]
if not present and not path_exists:
module.exit_json(changed=False, msg="File %s not present" % path)
if insertbefore is None and insertafter is None:
insertafter = 'EOF'
if insertafter not in (None, 'EOF'):
insertre = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
elif insertbefore not in (None, 'BOF'):
insertre = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
else:
insertre = None
marker0 = re.sub(b(r'{mark}'), b(params['marker_begin']), marker) + b(os.linesep)
marker1 = re.sub(b(r'{mark}'), b(params['marker_end']), marker) + b(os.linesep)
if present and block:
if not block.endswith(b(os.linesep)):
block += b(os.linesep)
blocklines = [marker0] + block.splitlines(True) + [marker1]
else:
blocklines = []
n0 = n1 = None
for i, line in enumerate(lines):
if line == marker0:
n0 = i
if line == marker1:
n1 = i
if None in (n0, n1):
n0 = None
if insertre is not None:
if insertre.flags & re.MULTILINE:
match = insertre.search(original)
if match:
if insertafter:
n0 = to_native(original).count('\n', 0, match.end())
elif insertbefore:
n0 = to_native(original).count('\n', 0, match.start())
else:
for i, line in enumerate(lines):
if insertre.search(line):
n0 = i
if n0 is None:
n0 = len(lines)
elif insertafter is not None:
n0 += 1
elif insertbefore is not None:
n0 = 0 # insertbefore=BOF
else:
n0 = len(lines) # insertafter=EOF
elif n0 < n1:
lines[n0:n1 + 1] = []
else:
lines[n1:n0 + 1] = []
n0 = n1
# Ensure there is a line separator before the block of lines to be inserted
if n0 > 0:
if not lines[n0 - 1].endswith(b(os.linesep)):
lines[n0 - 1] += b(os.linesep)
# Before the block: check if we need to prepend a blank line
# If yes, we need to add the blank line if we are not at the beginning of the file
# and the previous line is not a blank line
# In both cases, we need to shift by one on the right the inserting position of the block
if params['prepend_newline'] and present:
if n0 != 0 and lines[n0 - 1] != b(os.linesep):
lines[n0:n0] = blank_line
n0 += 1
# Insert the block
lines[n0:n0] = blocklines
# After the block: check if we need to append a blank line
# If yes, we need to add the blank line if we are not at the end of the file
# and the line right after is not a blank line
if params['append_newline'] and present:
line_after_block = n0 + len(blocklines)
if line_after_block < len(lines) and lines[line_after_block] != b(os.linesep):
lines[line_after_block:line_after_block] = blank_line
if lines:
result = b''.join(lines)
else:
result = b''
if module._diff:
diff['after'] = result
if original == result:
msg = ''
changed = False
elif original is None:
msg = 'File created'
changed = True
elif not blocklines:
msg = 'Block removed'
changed = True
else:
msg = 'Block inserted'
changed = True
backup_file = None
if changed and not module.check_mode:
if module.boolean(params['backup']) and path_exists:
backup_file = module.backup_local(path)
# We should always follow symlinks so that we change the real file
real_path = os.path.realpath(params['path'])
write_changes(module, result, real_path)
if module.check_mode and not path_exists:
module.exit_json(changed=changed, msg=msg, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % path
attr_diff['after_header'] = '%s (file attributes)' % path
difflist = [diff, attr_diff]
if backup_file is None:
module.exit_json(changed=changed, msg=msg, diff=difflist)
else:
module.exit_json(changed=changed, msg=msg, diff=difflist, backup_file=backup_file)
if __name__ == '__main__':
main()
| 15,450
|
Python
|
.py
| 375
| 34.261333
| 147
| 0.644792
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,192
|
deb822_repository.py
|
ansible_ansible/lib/ansible/modules/deb822_repository.py
|
# -*- coding: utf-8 -*-
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
author: 'Ansible Core Team (@ansible)'
short_description: 'Add and remove deb822 formatted repositories'
description:
- 'Add and remove deb822 formatted repositories in Debian based distributions.'
module: deb822_repository
notes:
- This module will not automatically update caches, call the M(ansible.builtin.apt) module based
on the changed state.
options:
allow_downgrade_to_insecure:
description:
- Allow downgrading a package that was previously authenticated but
is no longer authenticated.
type: bool
allow_insecure:
description:
- Allow insecure repositories.
type: bool
allow_weak:
description:
- Allow repositories signed with a key using a weak digest algorithm.
type: bool
architectures:
description:
- Architectures to search within repository.
type: list
elements: str
by_hash:
description:
- Controls if APT should try to acquire indexes via a URI constructed
from a hashsum of the expected file instead of using the well-known
stable filename of the index.
type: bool
check_date:
description:
- Controls if APT should consider the machine's time correct and hence
perform time related checks, such as verifying that a Release file
is not from the future.
type: bool
check_valid_until:
description:
- Controls if APT should try to detect replay attacks.
type: bool
components:
description:
- Components specify different sections of one distribution version
present in a C(Suite).
type: list
elements: str
date_max_future:
description:
- Controls how far from the future a repository may be.
type: int
enabled:
description:
- Tells APT whether the source is enabled or not.
type: bool
inrelease_path:
description:
- Determines the path to the C(InRelease) file, relative to the normal
position of an C(InRelease) file.
type: str
languages:
description:
- Defines which languages information such as translated
package descriptions should be downloaded.
type: list
elements: str
name:
description:
- Name of the repo. Specifically used for C(X-Repolib-Name) and in
naming the repository and signing key files.
required: true
type: str
pdiffs:
description:
- Controls if APT should try to use C(PDiffs) to update old indexes
instead of downloading the new indexes entirely.
type: bool
signed_by:
description:
- Either a URL to a GPG key, absolute path to a keyring file, one or
more fingerprints of keys either in the C(trusted.gpg) keyring or in
the keyrings in the C(trusted.gpg.d/) directory, or an ASCII armored
GPG public key block.
type: str
suites:
description:
- >-
Suite can specify an exact path in relation to the URI(s) provided,
in which case the Components: must be omitted and suite must end
with a slash (C(/)). Alternatively, it may take the form of a
distribution version (for example a version codename like C(disco) or C(artful)).
If the suite does not specify a path, at least one component must
be present.
type: list
elements: str
targets:
description:
- Defines which download targets apt will try to acquire from this source.
type: list
elements: str
trusted:
description:
- Decides if a source is considered trusted or if warnings should be
raised before, for example packages are installed from this source.
type: bool
types:
choices:
- deb
- deb-src
default:
- deb
type: list
elements: str
description:
- Which types of packages to look for from a given source; either
binary V(deb) or source code V(deb-src).
uris:
description:
- The URIs must specify the base of the Debian distribution archive,
from which APT finds the information it needs.
type: list
elements: str
mode:
description:
- The octal mode for newly created files in C(sources.list.d).
type: raw
default: '0644'
state:
description:
- A source string state.
type: str
choices:
- absent
- present
default: present
requirements:
- python3-debian / python-debian
version_added: '2.15'
"""
EXAMPLES = """
- name: Add debian repo
deb822_repository:
name: debian
types: deb
uris: http://deb.debian.org/debian
suites: stretch
components:
- main
- contrib
- non-free
- name: Add debian repo with key
deb822_repository:
name: debian
types: deb
uris: https://deb.debian.org
suites: stable
components:
- main
- contrib
- non-free
signed_by: |-
-----BEGIN PGP PUBLIC KEY BLOCK-----
mDMEYCQjIxYJKwYBBAHaRw8BAQdAD/P5Nvvnvk66SxBBHDbhRml9ORg1WV5CvzKY
CuMfoIS0BmFiY2RlZoiQBBMWCgA4FiEErCIG1VhKWMWo2yfAREZd5NfO31cFAmAk
IyMCGyMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQREZd5NfO31fbOwD6ArzS
dM0Dkd5h2Ujy1b6KcAaVW9FOa5UNfJ9FFBtjLQEBAJ7UyWD3dZzhvlaAwunsk7DG
3bHcln8DMpIJVXht78sL
=IE0r
-----END PGP PUBLIC KEY BLOCK-----
- name: Add repo using key from URL
deb822_repository:
name: example
types: deb
uris: https://download.example.com/linux/ubuntu
suites: '{{ ansible_distribution_release }}'
components: stable
architectures: amd64
signed_by: https://download.example.com/linux/ubuntu/gpg
"""
RETURN = """
repo:
description: A source string for the repository
returned: always
type: str
sample: |
X-Repolib-Name: debian
Types: deb
URIs: https://deb.debian.org
Suites: stable
Components: main contrib non-free
Signed-By:
-----BEGIN PGP PUBLIC KEY BLOCK-----
.
mDMEYCQjIxYJKwYBBAHaRw8BAQdAD/P5Nvvnvk66SxBBHDbhRml9ORg1WV5CvzKY
CuMfoIS0BmFiY2RlZoiQBBMWCgA4FiEErCIG1VhKWMWo2yfAREZd5NfO31cFAmAk
IyMCGyMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQREZd5NfO31fbOwD6ArzS
dM0Dkd5h2Ujy1b6KcAaVW9FOa5UNfJ9FFBtjLQEBAJ7UyWD3dZzhvlaAwunsk7DG
3bHcln8DMpIJVXht78sL
=IE0r
-----END PGP PUBLIC KEY BLOCK-----
dest:
description: Path to the repository file
returned: always
type: str
sample: /etc/apt/sources.list.d/focal-archive.sources
key_filename:
description: Path to the signed_by key file
returned: always
type: str
sample: /etc/apt/keyrings/debian.gpg
"""
import os
import re
import tempfile
import textwrap
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.file import S_IRWXU_RXG_RXO, S_IRWU_RG_RO
from ansible.module_utils.common.text.converters import to_bytes
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import raise_from # type: ignore[attr-defined]
from ansible.module_utils.urls import generic_urlparse
from ansible.module_utils.urls import open_url
from ansible.module_utils.urls import get_user_agent
from ansible.module_utils.urls import urlparse
HAS_DEBIAN = True
DEBIAN_IMP_ERR = None
try:
from debian.deb822 import Deb822 # type: ignore[import]
except ImportError:
HAS_DEBIAN = False
DEBIAN_IMP_ERR = traceback.format_exc()
KEYRINGS_DIR = '/etc/apt/keyrings'
def ensure_keyrings_dir(module):
changed = False
if not os.path.isdir(KEYRINGS_DIR):
if not module.check_mode:
os.mkdir(KEYRINGS_DIR, S_IRWXU_RXG_RXO)
changed |= True
changed |= module.set_fs_attributes_if_different(
{
'path': KEYRINGS_DIR,
'secontext': [None, None, None],
'owner': 'root',
'group': 'root',
'mode': '0755',
'attributes': None,
},
changed,
)
return changed
def make_signed_by_filename(slug, ext):
return os.path.join(KEYRINGS_DIR, '%s.%s' % (slug, ext))
def make_sources_filename(slug):
return os.path.join(
'/etc/apt/sources.list.d',
'%s.sources' % slug
)
def format_bool(v):
return 'yes' if v else 'no'
def format_list(v):
return ' '.join(v)
def format_multiline(v):
return '\n' + textwrap.indent(
'\n'.join(line.strip() or '.' for line in v.strip().splitlines()),
' '
)
def format_field_name(v):
if v == 'name':
return 'X-Repolib-Name'
elif v == 'uris':
return 'URIs'
return v.replace('_', '-').title()
def is_armored(b_data):
return b'-----BEGIN PGP PUBLIC KEY BLOCK-----' in b_data
def write_signed_by_key(module, v, slug):
changed = False
if os.path.isfile(v):
return changed, v, None
b_data = None
parts = generic_urlparse(urlparse(v))
if parts.scheme:
try:
r = open_url(v, http_agent=get_user_agent())
except Exception as exc:
raise_from(RuntimeError(to_native(exc)), exc)
else:
b_data = r.read()
else:
# Not a file, nor a URL, just pass it through
return changed, None, v
if not b_data:
return changed, v, None
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
with os.fdopen(tmpfd, 'wb') as f:
f.write(b_data)
ext = 'asc' if is_armored(b_data) else 'gpg'
filename = make_signed_by_filename(slug, ext)
src_chksum = module.sha256(tmpfile)
dest_chksum = module.sha256(filename)
if src_chksum != dest_chksum:
changed |= ensure_keyrings_dir(module)
if not module.check_mode:
module.atomic_move(tmpfile, filename)
changed |= True
changed |= module.set_mode_if_different(filename, S_IRWU_RG_RO, False)
return changed, filename, None
def main():
module = AnsibleModule(
argument_spec={
'allow_downgrade_to_insecure': {
'type': 'bool',
},
'allow_insecure': {
'type': 'bool',
},
'allow_weak': {
'type': 'bool',
},
'architectures': {
'elements': 'str',
'type': 'list',
},
'by_hash': {
'type': 'bool',
},
'check_date': {
'type': 'bool',
},
'check_valid_until': {
'type': 'bool',
},
'components': {
'elements': 'str',
'type': 'list',
},
'date_max_future': {
'type': 'int',
},
'enabled': {
'type': 'bool',
},
'inrelease_path': {
'type': 'str',
},
'languages': {
'elements': 'str',
'type': 'list',
},
'name': {
'type': 'str',
'required': True,
},
'pdiffs': {
'type': 'bool',
},
'signed_by': {
'type': 'str',
},
'suites': {
'elements': 'str',
'type': 'list',
},
'targets': {
'elements': 'str',
'type': 'list',
},
'trusted': {
'type': 'bool',
},
'types': {
'choices': [
'deb',
'deb-src',
],
'elements': 'str',
'type': 'list',
'default': [
'deb',
]
},
'uris': {
'elements': 'str',
'type': 'list',
},
# non-deb822 args
'mode': {
'type': 'raw',
'default': '0644',
},
'state': {
'type': 'str',
'choices': [
'present',
'absent',
],
'default': 'present',
},
},
supports_check_mode=True,
)
if not HAS_DEBIAN:
module.fail_json(msg=missing_required_lib("python3-debian"),
exception=DEBIAN_IMP_ERR)
check_mode = module.check_mode
changed = False
# Make a copy, so we don't mutate module.params to avoid future issues
params = module.params.copy()
# popped non-deb822 args
mode = params.pop('mode')
state = params.pop('state')
name = params['name']
slug = re.sub(
r'[^a-z0-9-]+',
'',
re.sub(
r'[_\s]+',
'-',
name.lower(),
),
)
sources_filename = make_sources_filename(slug)
if state == 'absent':
if os.path.exists(sources_filename):
if not check_mode:
os.unlink(sources_filename)
changed |= True
for ext in ('asc', 'gpg'):
signed_by_filename = make_signed_by_filename(slug, ext)
if os.path.exists(signed_by_filename):
if not check_mode:
os.unlink(signed_by_filename)
changed = True
module.exit_json(
repo=None,
changed=changed,
dest=sources_filename,
key_filename=signed_by_filename,
)
deb822 = Deb822()
signed_by_filename = None
for key, value in sorted(params.items()):
if value is None:
continue
if isinstance(value, bool):
value = format_bool(value)
elif isinstance(value, int):
value = to_native(value)
elif is_sequence(value):
value = format_list(value)
elif key == 'signed_by':
try:
key_changed, signed_by_filename, signed_by_data = write_signed_by_key(module, value, slug)
value = signed_by_filename or signed_by_data
changed |= key_changed
except RuntimeError as exc:
module.fail_json(
msg='Could not fetch signed_by key: %s' % to_native(exc)
)
if value.count('\n') > 0:
value = format_multiline(value)
deb822[format_field_name(key)] = value
repo = deb822.dump()
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
with os.fdopen(tmpfd, 'wb') as f:
f.write(to_bytes(repo))
sources_filename = make_sources_filename(slug)
src_chksum = module.sha256(tmpfile)
dest_chksum = module.sha256(sources_filename)
if src_chksum != dest_chksum:
if not check_mode:
module.atomic_move(tmpfile, sources_filename)
changed |= True
changed |= module.set_mode_if_different(sources_filename, mode, False)
module.exit_json(
repo=repo,
changed=changed,
dest=sources_filename,
key_filename=signed_by_filename,
)
if __name__ == '__main__':
main()
| 15,791
|
Python
|
.py
| 491
| 23.478615
| 106
| 0.58463
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,193
|
async_status.py
|
ansible_ansible/lib/ansible/modules/async_status.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: async_status
short_description: Obtain status of asynchronous task
description:
- This module gets the status of an asynchronous task.
- This module is also supported for Windows targets.
version_added: "0.5"
options:
jid:
description:
- Job or task identifier
type: str
required: true
mode:
description:
- If V(status), obtain the status.
- If V(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job O(jid), without waiting for it to finish.
type: str
choices: [ cleanup, status ]
default: status
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
check_mode:
support: full
version_added: '2.17'
diff_mode:
support: none
bypass_host_loop:
support: none
platform:
support: full
platforms: posix, windows
seealso:
- ref: playbooks_async
description: Detailed information on how to use asynchronous actions and polling.
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = r"""
---
- name: Asynchronous dnf task
ansible.builtin.dnf:
name: docker-io
state: present
async: 1000
poll: 0
register: dnf_sleeper
- name: Wait for asynchronous job to end
ansible.builtin.async_status:
jid: '{{ dnf_sleeper.ansible_job_id }}'
register: job_result
until: job_result.finished
retries: 100
delay: 10
- name: Clean up async file
ansible.builtin.async_status:
jid: '{{ dnf_sleeper.ansible_job_id }}'
mode: cleanup
"""
RETURN = r"""
ansible_job_id:
description: The asynchronous job id
returned: success
type: str
sample: '360874038559.4169'
finished:
description: Whether the asynchronous job has finished (V(1)) or not (V(0))
returned: always
type: int
sample: 1
started:
description: Whether the asynchronous job has started (V(1)) or not (V(0))
returned: always
type: int
sample: 1
stdout:
description: Any output returned by async_wrapper
returned: always
type: str
stderr:
description: Any errors returned by async_wrapper
returned: always
type: str
erased:
description: Path to erased job file
returned: when file is erased
type: str
"""
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
jid=dict(type="str", required=True),
mode=dict(type="str", default="status", choices=["cleanup", "status"]),
# passed in from the async_status action plugin
_async_dir=dict(type="path", required=True),
),
supports_check_mode=True,
)
mode = module.params['mode']
jid = module.params['jid']
async_dir = module.params['_async_dir']
# setup logging directory
log_path = os.path.join(async_dir, jid)
if not os.path.exists(log_path):
module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
if mode == 'cleanup':
os.unlink(log_path)
module.exit_json(ansible_job_id=jid, erased=log_path)
# NOT in cleanup mode, assume regular status mode
# no remote kill mode currently exists, but probably should
# consider log_path + ".pid" file and also unlink that above
data = None
try:
with open(log_path) as f:
data = json.loads(f.read())
except Exception:
if not data:
# file not written yet? That means it is running
module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
else:
module.fail_json(ansible_job_id=jid, results_file=log_path,
msg="Could not parse job output: %s" % data, started=1, finished=1)
if 'started' not in data:
data['finished'] = 1
data['ansible_job_id'] = jid
elif 'finished' not in data:
data['finished'] = 0
# Fix error: TypeError: exit_json() keywords must be strings
data = {to_native(k): v for k, v in iteritems(data)}
module.exit_json(**data)
if __name__ == '__main__':
main()
| 4,574
|
Python
|
.py
| 149
| 26.040268
| 150
| 0.681436
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,194
|
wait_for.py
|
ansible_ansible/lib/ansible/modules/wait_for.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: wait_for
short_description: Waits for a condition before continuing
description:
- You can wait for a set amount of time O(timeout), this is the default if nothing is specified or just O(timeout) is specified.
This does not produce an error.
- Waiting for a port to become available is useful for when services are not immediately available after their init scripts return
which is true of certain Java application servers.
- It is also useful when starting guests with the M(community.libvirt.virt) module and needing to pause until they are ready.
- This module can also be used to wait for a regex match a string to be present in a file.
- In Ansible 1.6 and later, this module can also be used to wait for a file to be available or
absent on the filesystem.
- In Ansible 1.8 and later, this module can also be used to wait for active connections to be closed before continuing, useful if a node
is being rotated out of a load balancer pool.
- For Windows targets, use the M(ansible.windows.win_wait_for) module instead.
version_added: "0.7"
options:
host:
description:
- A resolvable hostname or IP address to wait for.
type: str
default: 127.0.0.1
timeout:
description:
- Maximum number of seconds to wait for, when used with another condition it will force an error.
- When used without other conditions it is equivalent of just sleeping.
type: int
default: 300
connect_timeout:
description:
- Maximum number of seconds to wait for a connection to happen before closing and retrying.
type: int
default: 5
delay:
description:
- Number of seconds to wait before starting to poll.
type: int
default: 0
port:
description:
- Port number to poll.
- O(path) and O(port) are mutually exclusive parameters.
type: int
active_connection_states:
description:
- The list of TCP connection states which are counted as active connections.
type: list
elements: str
default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]
version_added: "2.3"
state:
description:
- Either V(present), V(started), or V(stopped), V(absent), or V(drained).
- When checking a port V(started) will ensure the port is open, V(stopped) will check that it is closed, V(drained) will check for active connections.
- When checking for a file or a search string V(present) or V(started) will ensure that the file or string is present before continuing,
V(absent) will check that file is absent or removed.
type: str
choices: [ absent, drained, present, started, stopped ]
default: started
path:
description:
- Path to a file on the filesystem that must exist before continuing.
- O(path) and O(port) are mutually exclusive parameters.
type: path
version_added: "1.4"
search_regex:
description:
- Can be used to match a string in either a file or a socket connection.
- Defaults to a multiline regex.
type: str
version_added: "1.4"
exclude_hosts:
description:
- List of hosts or IPs to ignore when looking for active TCP connections for V(drained) state.
type: list
elements: str
version_added: "1.8"
sleep:
description:
- Number of seconds to sleep between checks.
- Before Ansible 2.3 this was hardcoded to 1 second.
type: int
default: 1
version_added: "2.3"
msg:
description:
- This overrides the normal error message from a failure to meet the required conditions.
type: str
version_added: "2.4"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
platform:
platforms: posix
notes:
- The ability to use search_regex with a port connection was added in Ansible 1.7.
- Prior to Ansible 2.4, testing for the absence of a directory or UNIX socket did not work correctly.
- Prior to Ansible 2.4, testing for the presence of a file did not work correctly if the remote user did not have read access to that file.
- Under some circumstances when using mandatory access control, a path may always be treated as being absent even if it exists, but
can't be modified or created by the remote user either.
- When waiting for a path, symbolic links will be followed. Many other modules that manipulate files do not follow symbolic links,
so operations on the path using other modules may not work exactly as expected.
seealso:
- module: ansible.builtin.wait_for_connection
- module: ansible.windows.win_wait_for
- module: community.windows.win_wait_for_process
author:
- Jeroen Hoekx (@jhoekx)
- John Jarvis (@jarv)
- Andrii Radyk (@AnderEnder)
"""
EXAMPLES = r"""
- name: Sleep for 300 seconds and continue with play
ansible.builtin.wait_for:
timeout: 300
delegate_to: localhost
- name: Wait for port 8000 to become open on the host, don't start checking for 10 seconds
ansible.builtin.wait_for:
port: 8000
delay: 10
- name: Waits for port 8000 of any IP to close active connections, don't start checking for 10 seconds
ansible.builtin.wait_for:
host: 0.0.0.0
port: 8000
delay: 10
state: drained
- name: Wait for port 8000 of any IP to close active connections, ignoring connections for specified hosts
ansible.builtin.wait_for:
host: 0.0.0.0
port: 8000
state: drained
exclude_hosts: 10.2.1.2,10.2.1.3
- name: Wait until the file /tmp/foo is present before continuing
ansible.builtin.wait_for:
path: /tmp/foo
- name: Wait until the string "completed" is in the file /tmp/foo before continuing
ansible.builtin.wait_for:
path: /tmp/foo
search_regex: completed
- name: Wait until regex pattern matches in the file /tmp/foo and print the matched group
ansible.builtin.wait_for:
path: /tmp/foo
search_regex: completed (?P<task>\w+)
register: waitfor
- ansible.builtin.debug:
msg: Completed {{ waitfor['match_groupdict']['task'] }}
- name: Wait until the lock file is removed
ansible.builtin.wait_for:
path: /var/lock/file.lock
state: absent
- name: Wait until the process is finished and pid was destroyed
ansible.builtin.wait_for:
path: /proc/3466/status
state: absent
- name: Output customized message when failed
ansible.builtin.wait_for:
path: /tmp/foo
state: present
msg: Timeout to find file /tmp/foo
# Do not assume the inventory_hostname is resolvable and delay 10 seconds at start
- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
ansible.builtin.wait_for:
port: 22
host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
search_regex: OpenSSH
delay: 10
connection: local
# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'
- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
ansible.builtin.wait_for:
port: 22
host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
search_regex: OpenSSH
delay: 10
vars:
ansible_connection: local
"""
RETURN = r"""
elapsed:
description: The number of seconds that elapsed while waiting
returned: always
type: int
sample: 23
match_groups:
description: Tuple containing all the subgroups of the match as returned by U(https://docs.python.org/3/library/re.html#re.MatchObject.groups)
returned: always
type: list
sample: ['match 1', 'match 2']
match_groupdict:
description: Dictionary containing all the named subgroups of the match, keyed by the subgroup name,
as returned by U(https://docs.python.org/3/library/re.html#re.MatchObject.groupdict)
returned: always
type: dict
sample:
{
'group': 'match'
}
"""
import binascii
import contextlib
import datetime
import errno
import math
import mmap
import os
import re
import select
import socket
import time
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.compat.datetime import utcnow
HAS_PSUTIL = False
PSUTIL_IMP_ERR = None
try:
import psutil
HAS_PSUTIL = True
# just because we can import it on Linux doesn't mean we will use it
except ImportError:
PSUTIL_IMP_ERR = traceback.format_exc()
class TCPConnectionInfo(object):
"""
This is a generic TCP Connection Info strategy class that relies
on the psutil module, which is not ideal for targets, but necessary
for cross platform support.
A subclass may wish to override some or all of these methods.
- _get_exclude_ips()
- get_active_connections()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
match_all_ips = {
socket.AF_INET: '0.0.0.0',
socket.AF_INET6: '::',
}
ipv4_mapped_ipv6_address = {
'prefix': '::ffff',
'match_all': '::ffff:0.0.0.0'
}
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(TCPConnectionInfo)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_ip(module.params['host'])
self.port = int(self.module.params['port'])
self.exclude_ips = self._get_exclude_ips()
if not HAS_PSUTIL:
module.fail_json(msg=missing_required_lib('psutil'), exception=PSUTIL_IMP_ERR)
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_ip(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for p in psutil.process_iter():
try:
if hasattr(p, 'get_connections'):
connections = p.get_connections(kind='inet')
else:
connections = p.connections(kind='inet')
except psutil.Error:
# Process is Zombie or other error state
continue
for conn in connections:
if conn.status not in self.module.params['active_connection_states']:
continue
if hasattr(conn, 'local_address'):
(local_ip, local_port) = conn.local_address
else:
(local_ip, local_port) = conn.laddr
if self.port != local_port:
continue
if hasattr(conn, 'remote_address'):
(remote_ip, remote_port) = conn.remote_address
else:
(remote_ip, remote_port) = conn.raddr
if (conn.family, remote_ip) in self.exclude_ips:
continue
if any((
(conn.family, local_ip) in self.ips,
(conn.family, self.match_all_ips[conn.family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
return active_connections
# ===========================================
# Subclass: Linux
class LinuxTCPConnectionInfo(TCPConnectionInfo):
"""
This is a TCP Connection Info evaluation strategy class
that utilizes information from Linux's procfs. While less universal,
does allow Linux targets to not require an additional library.
"""
platform = 'Linux'
distribution = None
source_file = {
socket.AF_INET: '/proc/net/tcp',
socket.AF_INET6: '/proc/net/tcp6'
}
match_all_ips = {
socket.AF_INET: '00000000',
socket.AF_INET6: '00000000000000000000000000000000',
}
ipv4_mapped_ipv6_address = {
'prefix': '0000000000000000FFFF0000',
'match_all': '0000000000000000FFFF000000000000'
}
local_address_field = 1
remote_address_field = 2
connection_state_field = 3
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_hex(module.params['host'])
self.port = "%0.4X" % int(module.params['port'])
self.exclude_ips = self._get_exclude_ips()
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_hex(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for family in self.source_file.keys():
if not os.path.isfile(self.source_file[family]):
continue
try:
f = open(self.source_file[family])
for tcp_connection in f.readlines():
tcp_connection = tcp_connection.strip().split()
if tcp_connection[self.local_address_field] == 'local_address':
continue
if (tcp_connection[self.connection_state_field] not in
[get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
continue
(local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
if self.port != local_port:
continue
(remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
if (family, remote_ip) in self.exclude_ips:
continue
if any((
(family, local_ip) in self.ips,
(family, self.match_all_ips[family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
except IOError as e:
pass
finally:
f.close()
return active_connections
def _convert_host_to_ip(host):
"""
Perform forward DNS resolution on host, IP will give the same IP
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and IP
"""
addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
ips = []
for family, socktype, proto, canonname, sockaddr in addrinfo:
ip = sockaddr[0]
ips.append((family, ip))
if family == socket.AF_INET:
ips.append((socket.AF_INET6, "::ffff:" + ip))
return ips
def _convert_host_to_hex(host):
"""
Convert the provided host to the format in /proc/net/tcp*
/proc/net/tcp uses little-endian four byte hex for ipv4
/proc/net/tcp6 uses little-endian per 4B word for ipv6
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and the
little-endian converted host
"""
ips = []
if host is not None:
for family, ip in _convert_host_to_ip(host):
hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))
hexip_hf = ""
for i in range(0, len(hexip_nf), 8):
ipgroup_nf = hexip_nf[i:i + 8]
ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))
hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf)
ips.append((family, hexip_hf))
return ips
def _timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def get_connection_state_id(state):
connection_state_id = {
'ESTABLISHED': '01',
'SYN_SENT': '02',
'SYN_RECV': '03',
'FIN_WAIT1': '04',
'FIN_WAIT2': '05',
'TIME_WAIT': '06',
}
return connection_state_id[state]
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
timeout=dict(type='int', default=300),
connect_timeout=dict(type='int', default=5),
delay=dict(type='int', default=0),
port=dict(type='int'),
active_connection_states=dict(type='list', elements='str', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),
path=dict(type='path'),
search_regex=dict(type='str'),
state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),
exclude_hosts=dict(type='list', elements='str'),
sleep=dict(type='int', default=1),
msg=dict(type='str'),
),
)
host = module.params['host']
timeout = module.params['timeout']
connect_timeout = module.params['connect_timeout']
delay = module.params['delay']
port = module.params['port']
state = module.params['state']
path = module.params['path']
b_path = to_bytes(path, errors='surrogate_or_strict', nonstring='passthru')
search_regex = module.params['search_regex']
b_search_regex = to_bytes(search_regex, errors='surrogate_or_strict', nonstring='passthru')
msg = module.params['msg']
if search_regex is not None:
try:
b_compiled_search_re = re.compile(b_search_regex, re.MULTILINE)
except re.error as e:
module.fail_json(msg="Invalid regular expression: %s" % e)
else:
b_compiled_search_re = None
match_groupdict = {}
match_groups = ()
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for", elapsed=0)
if path and state == 'stopped':
module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module", elapsed=0)
if path and state == 'drained':
module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module", elapsed=0)
if module.params['exclude_hosts'] is not None and state != 'drained':
module.fail_json(msg="exclude_hosts should only be with state=drained", elapsed=0)
for _connection_state in module.params['active_connection_states']:
try:
get_connection_state_id(_connection_state)
except Exception:
module.fail_json(msg="unknown active_connection_state (%s) defined" % _connection_state, elapsed=0)
start = utcnow()
if delay:
time.sleep(delay)
if not port and not path and state != 'drained':
time.sleep(timeout)
elif state in ['absent', 'stopped']:
# first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while utcnow() < end:
if path:
try:
if not os.access(b_path, os.F_OK):
break
except IOError:
break
elif port:
try:
s = socket.create_connection((host, port), connect_timeout)
s.shutdown(socket.SHUT_RDWR)
s.close()
except Exception:
break
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else:
elapsed = utcnow() - start
if port:
module.fail_json(msg=msg or "Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif path:
module.fail_json(msg=msg or "Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
# wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while utcnow() < end:
if path:
try:
os.stat(b_path)
except OSError as e:
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = utcnow() - start
module.fail_json(msg=msg or "Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
# File exists. Are there additional things to check?
if not b_compiled_search_re:
# nope, succeed!
break
try:
with open(b_path, 'rb') as f:
try:
with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mm:
search = b_compiled_search_re.search(mm)
if search:
if search.groupdict():
match_groupdict = search.groupdict()
if search.groups():
match_groups = search.groups()
break
except (ValueError, OSError) as e:
module.debug('wait_for failed to use mmap on "%s": %s. Falling back to file read().' % (path, to_native(e)))
# cannot mmap this file, try normal read
search = re.search(b_compiled_search_re, f.read())
if search:
if search.groupdict():
match_groupdict = search.groupdict()
if search.groups():
match_groups = search.groups()
break
except Exception as e:
module.warn('wait_for failed on "%s", unexpected exception(%s): %s.).' % (path, to_native(e.__class__), to_native(e)))
except IOError:
pass
elif port:
alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - utcnow()))
try:
s = socket.create_connection((host, port), min(connect_timeout, alt_connect_timeout))
except Exception:
# Failed to connect by connect_timeout. wait and try again
pass
else:
# Connected -- are there additional conditions?
if b_compiled_search_re:
b_data = b''
matched = False
while utcnow() < end:
max_timeout = math.ceil(_timedelta_total_seconds(end - utcnow()))
readable = select.select([s], [], [], max_timeout)[0]
if not readable:
# No new data. Probably means our timeout
# expired
continue
response = s.recv(1024)
if not response:
# Server shutdown
break
b_data += response
if b_compiled_search_re.search(b_data):
matched = True
break
# Shutdown the client socket
try:
s.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno != errno.ENOTCONN:
raise
# else, the server broke the connection on its end, assume it's not ready
else:
s.close()
if matched:
# Found our string, success!
break
else:
# Connection established, success!
try:
s.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno != errno.ENOTCONN:
raise
# else, the server broke the connection on its end, assume it's not ready
else:
s.close()
break
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else: # while-else
# Timeout expired
elapsed = utcnow() - start
if port:
if search_regex:
module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
else:
module.fail_json(msg=msg or "Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
elif path:
if search_regex:
module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
else:
module.fail_json(msg=msg or "Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
elif state == 'drained':
# wait until all active connections are gone
end = start + datetime.timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
while utcnow() < end:
if tcpconns.get_active_connections_count() == 0:
break
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else:
elapsed = utcnow() - start
module.fail_json(msg=msg or "Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
elapsed = utcnow() - start
module.exit_json(state=state, port=port, search_regex=search_regex, match_groups=match_groups, match_groupdict=match_groupdict, path=path,
elapsed=elapsed.seconds)
if __name__ == '__main__':
main()
| 27,322
|
Python
|
.py
| 627
| 32.704944
| 159
| 0.591397
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,195
|
replace.py
|
ansible_ansible/lib/ansible/modules/replace.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Evan Kaufman <evan@digitalflophouse.com
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: replace
author: Evan Kaufman (@EvanK)
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.files
- files
- validate
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: posix
safe_file_operations:
support: full
vault:
support: none
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
path:
description:
- The file to modify.
- Before Ansible 2.3 this option was only usable as O(dest), O(destfile) and O(name).
type: path
required: true
aliases: [ dest, destfile, name ]
regexp:
description:
- The regular expression to look for in the contents of the file.
- Uses Python regular expressions; see
U(https://docs.python.org/3/library/re.html).
- Uses MULTILINE mode, which means V(^) and V($) match the beginning
and end of the file, as well as the beginning and end respectively
of I(each line) of the file.
- Does not use DOTALL, which means the V(.) special character matches
any character I(except newlines). A common mistake is to assume that
a negated character set like V([^#]) will also not match newlines.
- In order to exclude newlines, they must be added to the set like V([^#\\n]).
- Note that, as of Ansible 2.0, short form tasks should have any escape
sequences backslash-escaped in order to prevent them being parsed
as string literal escapes. See the examples.
type: str
required: true
replace:
description:
- The string to replace regexp matches.
- May contain backreferences that will get expanded with the regexp capture groups if the regexp matches.
- If not set, matches are removed entirely.
- Backreferences can be used ambiguously like V(\\1), or explicitly like V(\\g<1>).
type: str
default: ''
after:
description:
- If specified, only content after this match will be replaced/removed.
- Can be used in combination with O(before).
- Uses Python regular expressions; see
U(https://docs.python.org/3/library/re.html).
- Uses DOTALL, which means the V(.) special character I(can match newlines).
- Does not use MULTILINE, so V(^) and V($) will only match the beginning and end of the file.
type: str
version_added: "2.4"
before:
description:
- If specified, only content before this match will be replaced/removed.
- Can be used in combination with O(after).
- Uses Python regular expressions; see
U(https://docs.python.org/3/library/re.html).
- Uses DOTALL, which means the V(.) special character I(can match newlines).
- Does not use MULTILINE, so V(^) and V($) will only match the beginning and end of the file.
type: str
version_added: "2.4"
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
encoding:
description:
- The character encoding for reading and writing the file.
type: str
default: utf-8
version_added: "2.4"
notes:
- As of Ansible 2.3, the O(dest) option has been changed to O(path) as default, but O(dest) still works as well.
- As of Ansible 2.7.10, the combined use of O(before) and O(after) works properly. If you were relying on the
previous incorrect behavior, you may be need to adjust your tasks.
See U(https://github.com/ansible/ansible/issues/31354) for details.
- Option O(ignore:follow) has been removed in Ansible 2.5, because this module modifies the contents of the file
so O(ignore:follow=no) does not make sense.
"""
EXAMPLES = r"""
- name: Replace old hostname with new hostname (requires Ansible >= 2.4)
ansible.builtin.replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
- name: Replace after the expression till the end of the file (requires Ansible >= 2.4)
ansible.builtin.replace:
path: /etc/apache2/sites-available/default.conf
after: 'NameVirtualHost [*]'
regexp: '^(.+)$'
replace: '# \1'
- name: Replace before the expression from the beginning of the file (requires Ansible >= 2.4)
ansible.builtin.replace:
path: /etc/apache2/sites-available/default.conf
before: '# live site config'
regexp: '^(.+)$'
replace: '# \1'
# Prior to Ansible 2.7.10, using before and after in combination did the opposite of what was intended.
# see https://github.com/ansible/ansible/issues/31354 for details.
# Note (?m) which turns on MULTILINE mode so ^ matches any line's beginning
- name: Replace between the expressions (requires Ansible >= 2.4)
ansible.builtin.replace:
path: /etc/hosts
after: '(?m)^<VirtualHost [*]>'
before: '</VirtualHost>'
regexp: '^(.+)$'
replace: '# \1'
- name: Supports common file attributes
ansible.builtin.replace:
path: /home/jdoe/.ssh/known_hosts
regexp: '^old\.host\.name[^\n]*\n'
owner: jdoe
group: jdoe
mode: '0644'
- name: Supports a validate command
ansible.builtin.replace:
path: /etc/apache/ports
regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
replace: '\1 127.0.0.1:8080'
validate: '/usr/sbin/apache2ctl -f %s -t'
- name: Short form task (in ansible 2+) necessitates backslash-escaped sequences
ansible.builtin.replace: path=/etc/hosts regexp='\\b(localhost)(\\d*)\\b' replace='\\1\\2.localdomain\\2 \\1\\2'
- name: Long form task does not
ansible.builtin.replace:
path: /etc/hosts
regexp: '\b(localhost)(\d*)\b'
replace: '\1\2.localdomain\2 \1\2'
- name: Explicitly specifying positional matched groups in replacement
ansible.builtin.replace:
path: /etc/ssh/sshd_config
regexp: '^(ListenAddress[ ]+)[^\n]+$'
replace: '\g<1>0.0.0.0'
- name: Explicitly specifying named matched groups
ansible.builtin.replace:
path: /etc/ssh/sshd_config
regexp: '^(?P<dctv>ListenAddress[ ]+)(?P<host>[^\n]+)$'
replace: '#\g<dctv>\g<host>\n\g<dctv>0.0.0.0'
"""
RETURN = r"""#"""
import os
import re
import tempfile
from traceback import format_exc
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.module_utils.basic import AnsibleModule
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
with os.fdopen(tmpfd, 'wb') as f:
f.write(contents)
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
regexp=dict(type='str', required=True),
replace=dict(type='str', default=''),
after=dict(type='str'),
before=dict(type='str'),
backup=dict(type='bool', default=False),
validate=dict(type='str'),
encoding=dict(type='str', default='utf-8'),
),
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
path = params['path']
encoding = params['encoding']
res_args = dict(rc=0)
contents = None
params['after'] = to_text(params['after'], errors='surrogate_or_strict', nonstring='passthru')
params['before'] = to_text(params['before'], errors='surrogate_or_strict', nonstring='passthru')
params['regexp'] = to_text(params['regexp'], errors='surrogate_or_strict', nonstring='passthru')
params['replace'] = to_text(params['replace'], errors='surrogate_or_strict', nonstring='passthru')
if os.path.isdir(path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if not os.path.exists(path):
module.fail_json(rc=257, msg='Path %s does not exist !' % path)
else:
try:
with open(path, 'rb') as f:
contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
except (OSError, IOError) as e:
module.fail_json(msg='Unable to read the contents of %s: %s' % (path, to_text(e)),
exception=format_exc())
pattern = u''
if params['after'] and params['before']:
pattern = u'%s(?P<subsection>.*?)%s' % (params['after'], params['before'])
elif params['after']:
pattern = u'%s(?P<subsection>.*)' % params['after']
elif params['before']:
pattern = u'(?P<subsection>.*)%s' % params['before']
if pattern:
section_re = re.compile(pattern, re.DOTALL)
match = re.search(section_re, contents)
if match:
section = match.group('subsection')
indices = [match.start('subsection'), match.end('subsection')]
else:
res_args['msg'] = 'Pattern for before/after params did not match the given file: %s' % pattern
res_args['changed'] = False
module.exit_json(**res_args)
else:
section = contents
mre = re.compile(params['regexp'], re.MULTILINE)
try:
result = re.subn(mre, params['replace'], section, 0)
except re.error as e:
module.fail_json(msg="Unable to process replace due to error: %s" % to_text(e),
exception=format_exc())
if result[1] > 0 and section != result[0]:
if pattern:
result = (contents[:indices[0]] + result[0] + contents[indices[1]:], result[1])
msg = '%s replacements made' % result[1]
changed = True
if module._diff:
res_args['diff'] = {
'before_header': path,
'before': contents,
'after_header': path,
'after': result[0],
}
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(path):
res_args['backup_file'] = module.backup_local(path)
# We should always follow symlinks so that we change the real file
path = os.path.realpath(path)
write_changes(module, to_bytes(result[0], encoding=encoding), path)
res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| 11,818
|
Python
|
.py
| 280
| 35.757143
| 114
| 0.650461
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,196
|
reboot.py
|
ansible_ansible/lib/ansible/modules/reboot.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
module: reboot
short_description: Reboot a machine
notes:
- E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths)
to specify locations to search if the default paths do not work.
description:
- Reboot a machine, wait for it to go down, come back up, and respond to commands.
- For Windows targets, use the M(ansible.windows.win_reboot) module instead.
version_added: "2.7"
options:
pre_reboot_delay:
description:
- Seconds to wait before reboot. Passed as a parameter to the reboot command.
- On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
- On Solaris and FreeBSD, this will be seconds.
type: int
default: 0
post_reboot_delay:
description:
- Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
- This is useful if you want wait for something to settle despite your connection already working.
type: int
default: 0
reboot_timeout:
description:
- Maximum seconds to wait for machine to reboot and respond to a test command.
- This timeout is evaluated separately for both reboot verification and test command success so the
maximum execution time for the module is twice this amount.
type: int
default: 600
connect_timeout:
description:
- Maximum seconds to wait for a successful connection to the managed hosts before trying again.
- If unspecified, the default setting for the underlying connection plugin is used.
type: int
test_command:
description:
- Command to run on the rebooted host and expect success from to determine the machine is ready for
further tasks.
type: str
default: whoami
msg:
description:
- Message to display to users before reboot.
type: str
default: Reboot initiated by Ansible
search_paths:
description:
- Paths to search on the remote machine for the C(shutdown) command.
- I(Only) these paths will be searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching for the C(shutdown) command.
type: list
elements: str
default: ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin']
version_added: '2.8'
boot_time_command:
description:
- Command to run that returns a unique string indicating the last time the system was booted.
- Setting this to a command that has different output each time it is run will cause the task to fail.
type: str
default: 'cat /proc/sys/kernel/random/boot_id'
version_added: '2.10'
reboot_command:
description:
- Command to run that reboots the system, including any parameters passed to the command.
- Can be an absolute path to the command or just the command name. If an absolute path to the
command is not given, O(search_paths) on the target system will be searched to find the absolute path.
- This will cause O(pre_reboot_delay), O(post_reboot_delay), and O(msg) to be ignored.
type: str
default: '[determined based on target OS]'
version_added: '2.11'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
support: full
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
seealso:
- module: ansible.windows.win_reboot
author:
- Matt Davis (@nitzmahone)
- Sam Doran (@samdoran)
"""
EXAMPLES = r"""
- name: Unconditionally reboot the machine with all defaults
ansible.builtin.reboot:
- name: Reboot a slow machine that might have lots of updates to apply
ansible.builtin.reboot:
reboot_timeout: 3600
- name: Reboot a machine with shutdown command in unusual place
ansible.builtin.reboot:
search_paths:
- '/lib/molly-guard'
- name: Reboot machine using a custom reboot command
ansible.builtin.reboot:
reboot_command: launchctl reboot userspace
boot_time_command: uptime | cut -d ' ' -f 5
- name: Reboot machine and send a message
ansible.builtin.reboot:
msg: "Rebooting machine in 5 seconds"
"""
RETURN = r"""
rebooted:
description: true if the machine was rebooted
returned: always
type: bool
sample: true
elapsed:
description: The number of seconds that elapsed waiting for the system to be rebooted.
returned: always
type: int
sample: 23
"""
| 4,808
|
Python
|
.py
| 127
| 33.456693
| 155
| 0.721937
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,197
|
pip.py
|
ansible_ansible/lib/ansible/modules/pip.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
---
module: pip
short_description: Manages Python library dependencies
description:
- "Manage Python library dependencies. To use this module, one of the following keys is required: O(name)
or O(requirements)."
version_added: "0.7"
options:
name:
description:
- The name of a Python library to install or the url(bzr+,hg+,git+,svn+) of the remote package.
- This can be a list (since 2.2) and contain version specifiers (since 2.7).
type: list
elements: str
version:
description:
- The version number to install of the Python library specified in the O(name) parameter.
type: str
requirements:
description:
- The path to a pip requirements file, which should be local to the remote system.
File can be specified as a relative path if using the O(chdir) option.
type: str
virtualenv:
description:
- An optional path to a I(virtualenv) directory to install into.
It cannot be specified together with the O(executable) parameter
(added in 2.1).
If the virtualenv does not exist, it will be created before installing
packages. The optional O(virtualenv_site_packages), O(virtualenv_command),
and O(virtualenv_python) options affect the creation of the virtualenv.
type: path
virtualenv_site_packages:
description:
- Whether the virtual environment will inherit packages from the
global C(site-packages) directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
type: bool
default: "no"
version_added: "1.0"
virtualenv_command:
description:
- The command or a pathname to the command to create the virtual
environment with. For example V(pyvenv), V(virtualenv),
V(virtualenv2), V(~/bin/virtualenv), V(/usr/local/bin/virtualenv).
type: path
default: virtualenv
version_added: "1.1"
virtualenv_python:
description:
- The Python executable used for creating the virtual environment.
For example V(python3.12), V(python2.7). When not specified, the
Python version used to run the ansible module is used. This parameter
should not be used when O(virtualenv_command) is using V(pyvenv) or
the C(-m venv) module.
type: str
version_added: "2.0"
state:
description:
- The state of module.
- The V(forcereinstall) option is only available in Ansible 2.1 and above.
type: str
choices: [ absent, forcereinstall, latest, present ]
default: present
extra_args:
description:
- Extra arguments passed to C(pip).
type: str
version_added: "1.0"
editable:
description:
- Pass the editable flag.
type: bool
default: 'no'
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command.
type: path
version_added: "1.3"
executable:
description:
- The explicit executable or pathname for the C(pip) executable,
if different from the Ansible Python interpreter. For
example V(pip3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
- Mutually exclusive with O(virtualenv) (added in 2.1).
- Does not affect the Ansible Python interpreter.
- The C(setuptools) package must be installed for both the Ansible Python interpreter
and for the version of Python specified by this option.
type: path
version_added: "1.3"
umask:
description:
- The system umask to apply before installing the pip package. This is
useful, for example, when installing on systems that have a very
restrictive umask by default (e.g., C(0077)) and you want to C(pip install)
packages which are to be used by all users. Note that this requires you
to specify desired umask mode as an octal string, (e.g., C(0022)).
type: str
version_added: "2.1"
break_system_packages:
description:
- Allow C(pip) to modify an externally-managed Python installation as defined by PEP 668.
- This is typically required when installing packages outside a virtual environment on modern systems.
type: bool
default: false
version_added: "2.17"
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
notes:
- Python installations marked externally-managed (as defined by PEP668) cannot be updated by pip versions >= 23.0.1 without the use of
a virtual environment or setting the O(break_system_packages) option.
- The virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
- Although it executes using the Ansible Python interpreter, the pip module shells out to
run the actual pip command, so it can use any pip version you specify with O(executable).
By default, it uses the pip version for the Ansible Python interpreter. For example, pip3 on python 3, and pip2 or pip on python 2.
- The interpreter used by Ansible
(see R(ansible_python_interpreter, ansible_python_interpreter))
requires the setuptools package, regardless of the version of pip set with
the O(executable) option.
requirements:
- pip
- virtualenv
- setuptools or packaging
author:
- Matt Wright (@mattupstate)
"""
EXAMPLES = """
- name: Install bottle python package
ansible.builtin.pip:
name: bottle
- name: Install bottle python package on version 0.11
ansible.builtin.pip:
name: bottle==0.11
- name: Install bottle python package with version specifiers
ansible.builtin.pip:
name: bottle>0.10,<0.20,!=0.11
- name: Install multi python packages with version specifiers
ansible.builtin.pip:
name:
- django>1.11.0,<1.12.0
- bottle>0.10,<0.20,!=0.11
- name: Install python package using a proxy
ansible.builtin.pip:
name: six
environment:
http_proxy: 'http://127.0.0.1:8080'
https_proxy: 'https://127.0.0.1:8080'
# You do not have to supply '-e' option in extra_args
- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+,svn+)
ansible.builtin.pip:
name: svn+http://myrepo/svn/MyApp#egg=MyApp
- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+)
ansible.builtin.pip:
name: git+http://myrepo/app/MyApp
- name: Install MyApp from local tarball
ansible.builtin.pip:
name: file:///path/to/MyApp.tar.gz
- name: Install bottle into the specified (virtualenv), inheriting none of the globally installed modules
ansible.builtin.pip:
name: bottle
virtualenv: /my_app/venv
- name: Install bottle into the specified (virtualenv), inheriting globally installed modules
ansible.builtin.pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_site_packages: yes
- name: Install bottle into the specified (virtualenv), using Python 2.7
ansible.builtin.pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_command: virtualenv-2.7
- name: Install bottle within a user home directory
ansible.builtin.pip:
name: bottle
extra_args: --user
- name: Install specified python requirements
ansible.builtin.pip:
requirements: /my_app/requirements.txt
- name: Install specified python requirements in indicated (virtualenv)
ansible.builtin.pip:
requirements: /my_app/requirements.txt
virtualenv: /my_app/venv
- name: Install specified python requirements and custom Index URL
ansible.builtin.pip:
requirements: /my_app/requirements.txt
extra_args: -i https://example.com/pypi/simple
- name: Install specified python requirements offline from a local directory with downloaded packages
ansible.builtin.pip:
requirements: /my_app/requirements.txt
extra_args: "--no-index --find-links=file:///my_downloaded_packages_dir"
- name: Install bottle for Python 3.3 specifically, using the 'pip3.3' executable
ansible.builtin.pip:
name: bottle
executable: pip3.3
- name: Install bottle, forcing reinstallation if it's already installed
ansible.builtin.pip:
name: bottle
state: forcereinstall
- name: Install bottle while ensuring the umask is 0022 (to ensure other users can use it)
ansible.builtin.pip:
name: bottle
umask: "0022"
become: True
- name: Run a module inside a virtual environment
block:
- name: Ensure the virtual environment exists
pip:
name: psutil
virtualenv: "{{ venv_dir }}"
# On Debian-based systems the correct python*-venv package must be installed to use the `venv` module.
virtualenv_command: "{{ ansible_python_interpreter }} -m venv"
- name: Run a module inside the virtual environment
wait_for:
port: 22
vars:
# Alternatively, use a block to affect multiple tasks, or use set_fact to affect the remainder of the playbook.
ansible_python_interpreter: "{{ venv_python }}"
vars:
venv_dir: /tmp/pick-a-better-venv-path
venv_python: "{{ venv_dir }}/bin/python"
"""
RETURN = """
cmd:
description: pip command used by the module
returned: success
type: str
sample: pip2 install ansible six
name:
description: list of python modules targeted by pip
returned: success
type: list
sample: ['ansible', 'six']
requirements:
description: Path to the requirements file
returned: success, if a requirements file was provided
type: str
sample: "/srv/git/project/requirements.txt"
version:
description: Version of the package specified in 'name'
returned: success, if a name and version were provided
type: str
sample: "2.5.1"
virtualenv:
description: Path to the virtualenv
returned: success, if a virtualenv path was provided
type: str
sample: "/tmp/virtualenv"
"""
import argparse
import os
import re
import sys
import tempfile
import operator
import shlex
import traceback
from ansible.module_utils.compat.version import LooseVersion
PACKAGING_IMP_ERR = None
HAS_PACKAGING = False
HAS_SETUPTOOLS = False
try:
from packaging.requirements import Requirement as parse_requirement
HAS_PACKAGING = True
except Exception:
# This is catching a generic Exception, due to packaging on EL7 raising a TypeError on import
HAS_PACKAGING = False
PACKAGING_IMP_ERR = traceback.format_exc()
try:
from pkg_resources import Requirement
parse_requirement = Requirement.parse # type: ignore[misc,assignment]
del Requirement
HAS_SETUPTOOLS = True
except ImportError:
pass
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.basic import AnsibleModule, is_executable, missing_required_lib
from ansible.module_utils.common.locale import get_best_parsable_locale
#: Python one-liners to be run at the command line that will determine the
# installed version for these special libraries. These are libraries that
# don't end up in the output of pip freeze.
_SPECIAL_PACKAGE_CHECKERS = {
'importlib': {
'setuptools': 'from importlib.metadata import version; print(version("setuptools"))',
'pip': 'from importlib.metadata import version; print(version("pip"))',
},
'pkg_resources': {
'setuptools': 'import setuptools; print(setuptools.__version__)',
'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)',
}
}
_VCS_RE = re.compile(r'(svn|git|hg|bzr)\+')
op_dict = {">=": operator.ge, "<=": operator.le, ">": operator.gt,
"<": operator.lt, "==": operator.eq, "!=": operator.ne, "~=": operator.ge}
def _is_vcs_url(name):
"""Test whether a name is a vcs url or not."""
return re.match(_VCS_RE, name)
def _is_venv_command(command):
venv_parser = argparse.ArgumentParser()
venv_parser.add_argument('-m', type=str)
argv = shlex.split(command)
if argv[0] == 'pyvenv':
return True
args, dummy = venv_parser.parse_known_args(argv[1:])
if args.m == 'venv':
return True
return False
def _is_package_name(name):
"""Test whether the name is a package name or a version specifier."""
return not name.lstrip().startswith(tuple(op_dict.keys()))
def _recover_package_name(names):
"""Recover package names as list from user's raw input.
:input: a mixed and invalid list of names or version specifiers
:return: a list of valid package name
eg.
input: ['django>1.11.1', '<1.11.3', 'ipaddress', 'simpleproject>1.1.0', '<2.0.0']
return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
input: ['django>1.11.1,<1.11.3,ipaddress', 'simpleproject>1.1.0,<2.0.0']
return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
"""
# rebuild input name to a flat list so we can tolerate any combination of input
tmp = []
for one_line in names:
tmp.extend(one_line.split(","))
names = tmp
# reconstruct the names
name_parts = []
package_names = []
in_brackets = False
for name in names:
if _is_package_name(name) and not in_brackets:
if name_parts:
package_names.append(",".join(name_parts))
name_parts = []
if "[" in name:
in_brackets = True
if in_brackets and "]" in name:
in_brackets = False
name_parts.append(name)
package_names.append(",".join(name_parts))
return package_names
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
if rc != 0:
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
def _get_packages(module, pip, chdir):
"""Return results of pip command to get packages."""
# Try 'pip list' command first.
command = pip + ['list', '--format=freeze']
locale = get_best_parsable_locale(module)
lang_env = {'LANG': locale, 'LC_ALL': locale, 'LC_MESSAGES': locale}
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (pip version too old) then use 'pip freeze'.
if rc != 0:
command = pip + ['freeze']
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
return ' '.join(command), out, err
def _is_present(module, req, installed_pkgs, pkg_command):
"""Return whether or not package is installed."""
for pkg in installed_pkgs:
if '==' in pkg:
pkg_name, pkg_version = pkg.split('==')
pkg_name = Package.canonicalize_name(pkg_name)
else:
continue
if pkg_name == req.package_name and req.is_satisfied_by(pkg_version):
return True
return False
def _get_pip(module, env=None, executable=None):
candidate_pip_basenames = ('pip3',)
pip = None
if executable is not None:
if os.path.isabs(executable):
pip = executable
else:
# If you define your own executable that executable should be the only candidate.
# As noted in the docs, executable doesn't work with virtualenvs.
candidate_pip_basenames = (executable,)
elif executable is None and env is None and _have_pip_module():
# If no executable or virtualenv were specified, use the pip module for the current Python interpreter if available.
# Use of `__main__` is required to support Python 2.6 since support for executing packages with `runpy` was added in Python 2.7.
# Without it Python 2.6 gives the following error: pip is a package and cannot be directly executed
pip = [sys.executable, '-m', 'pip.__main__']
if pip is None:
if env is None:
opt_dirs = []
for basename in candidate_pip_basenames:
pip = module.get_bin_path(basename, False, opt_dirs)
if pip is not None:
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find any of %s to use. pip'
' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
# If we're using a virtualenv we must use the pip from the
# virtualenv
venv_dir = os.path.join(env, 'bin')
candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
for basename in candidate_pip_basenames:
candidate = os.path.join(venv_dir, basename)
if os.path.exists(candidate) and is_executable(candidate):
pip = candidate
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find pip in the virtualenv, %s, ' % env +
'under any of these names: %s. ' % (', '.join(candidate_pip_basenames)) +
'Make sure pip is present in the virtualenv.')
if not isinstance(pip, list):
pip = [pip]
return pip
def _have_pip_module(): # type: () -> bool
"""Return True if the `pip` module can be found using the current Python interpreter, otherwise return False."""
try:
from importlib.util import find_spec
except ImportError:
find_spec = None # type: ignore[assignment] # type: ignore[no-redef]
if find_spec: # type: ignore[truthy-function]
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences
found = bool(find_spec('pip'))
except Exception:
found = False
else:
# noinspection PyDeprecation
import imp
# noinspection PyBroadException
try:
# noinspection PyDeprecation
imp.find_module('pip')
except Exception:
found = False
else:
found = True
return found
def _fail(module, cmd, out, err):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg)
def _get_package_info(module, package, python_bin=None):
"""This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed.
"""
if python_bin is None:
return
discovery_mechanism = 'pkg_resources'
importlib_rc = module.run_command([python_bin, '-c', 'import importlib.metadata'])[0]
if importlib_rc == 0:
discovery_mechanism = 'importlib'
rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[discovery_mechanism][package]])
if rc:
formatted_dep = None
else:
formatted_dep = '%s==%s' % (package, out.strip())
return formatted_dep
def setup_virtualenv(module, env, chdir, out, err):
if module.check_mode:
module.exit_json(changed=True)
cmd = shlex.split(module.params['virtualenv_command'])
# Find the binary for the command in the PATH
# and switch the command for the explicit path.
if os.path.basename(cmd[0]) == cmd[0]:
cmd[0] = module.get_bin_path(cmd[0], True)
# Add the system-site-packages option if that
# is enabled, otherwise explicitly set the option
# to not use system-site-packages if that is an
# option provided by the command's help function.
if module.params['virtualenv_site_packages']:
cmd.append('--system-site-packages')
else:
cmd_opts = _get_cmd_options(module, cmd[0])
if '--no-site-packages' in cmd_opts:
cmd.append('--no-site-packages')
virtualenv_python = module.params['virtualenv_python']
# -p is a virtualenv option, not compatible with pyenv or venv
# this conditional validates if the command being used is not any of them
if not _is_venv_command(module.params['virtualenv_command']):
if virtualenv_python:
cmd.append('-p%s' % virtualenv_python)
else:
# This code mimics the upstream behaviour of using the python
# which invoked virtualenv to determine which python is used
# inside of the virtualenv (when none are specified).
cmd.append('-p%s' % sys.executable)
# if venv or pyvenv are used and virtualenv_python is defined, then
# virtualenv_python is ignored, this has to be acknowledged
elif module.params['virtualenv_python']:
module.fail_json(
msg='virtualenv_python should not be used when'
' using the venv module or pyvenv as virtualenv_command'
)
cmd.append(env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
_fail(module, cmd, out, err)
return out, err
class Package:
"""Python distribution package metadata wrapper.
A wrapper class for Requirement, which provides
API to parse package name, version specifier,
test whether a package is already satisfied.
"""
_CANONICALIZE_RE = re.compile(r'[-_.]+')
def __init__(self, name_string, version_string=None):
self._plain_package = False
self.package_name = name_string
self._requirement = None
if version_string:
version_string = version_string.lstrip()
separator = '==' if version_string[0].isdigit() else ' '
name_string = separator.join((name_string, version_string))
try:
self._requirement = parse_requirement(name_string)
# old pkg_resource will replace 'setuptools' with 'distribute' when it's already installed
project_name = Package.canonicalize_name(
getattr(self._requirement, 'name', None) or getattr(self._requirement, 'project_name', None)
)
if project_name == "distribute" and "setuptools" in name_string:
self.package_name = "setuptools"
else:
self.package_name = project_name
self._plain_package = True
except ValueError as e:
pass
@property
def has_version_specifier(self):
if self._plain_package:
return bool(getattr(self._requirement, 'specifier', None) or getattr(self._requirement, 'specs', None))
return False
def is_satisfied_by(self, version_to_test):
if not self._plain_package:
return False
try:
return self._requirement.specifier.contains(version_to_test, prereleases=True)
except AttributeError:
# old setuptools has no specifier, do fallback
version_to_test = LooseVersion(version_to_test)
return all(
op_dict[op](version_to_test, LooseVersion(ver))
for op, ver in self._requirement.specs
)
@staticmethod
def canonicalize_name(name):
# This is taken from PEP 503.
return Package._CANONICALIZE_RE.sub("-", name).lower()
def __str__(self):
if self._plain_package:
return to_native(self._requirement)
return self.package_name
def main():
state_map = dict(
present=['install'],
absent=['uninstall', '-y'],
latest=['install', '-U'],
forcereinstall=['install', '-U', '--force-reinstall'],
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=list(state_map.keys())),
name=dict(type='list', elements='str'),
version=dict(type='str'),
requirements=dict(type='str'),
virtualenv=dict(type='path'),
virtualenv_site_packages=dict(type='bool', default=False),
virtualenv_command=dict(type='path', default='virtualenv'),
virtualenv_python=dict(type='str'),
extra_args=dict(type='str'),
editable=dict(type='bool', default=False),
chdir=dict(type='path'),
executable=dict(type='path'),
umask=dict(type='str'),
break_system_packages=dict(type='bool', default=False),
),
required_one_of=[['name', 'requirements']],
mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
supports_check_mode=True,
)
if not HAS_SETUPTOOLS and not HAS_PACKAGING:
module.fail_json(msg=missing_required_lib("packaging"),
exception=PACKAGING_IMP_ERR)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
chdir = module.params['chdir']
umask = module.params['umask']
env = module.params['virtualenv']
venv_created = False
if env and chdir:
env = os.path.join(chdir, env)
if umask and not isinstance(umask, int):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_native(sys.exc_info()[1]))
old_umask = None
if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
chdir = tempfile.gettempdir()
err = ''
out = ''
if env:
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
venv_created = True
out, err = setup_virtualenv(module, env, chdir, out, err)
py_bin = os.path.join(env, 'bin', 'python')
else:
py_bin = module.params['executable'] or sys.executable
pip = _get_pip(module, env, module.params['executable'])
cmd = pip + state_map[state]
# If there's a virtualenv we want things we install to be able to use other
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
# not just a python package that will be found by calling the right python.
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
# in run_command by setting path_prefix here.
path_prefix = None
if env:
path_prefix = os.path.join(env, 'bin')
# Automatically apply -e option to extra_args when source is a VCS url. VCS
# includes those beginning with svn+, git+, hg+ or bzr+
has_vcs = False
if name:
for pkg in name:
if pkg and _is_vcs_url(pkg):
has_vcs = True
break
# convert raw input package names to Package instances
packages = [Package(pkg) for pkg in _recover_package_name(name)]
# check invalid combination of arguments
if version is not None:
if len(packages) > 1:
module.fail_json(
msg="'version' argument is ambiguous when installing multiple package distributions. "
"Please specify version restrictions next to each package in 'name' argument."
)
if packages[0].has_version_specifier:
module.fail_json(
msg="The 'version' argument conflicts with any version specifier provided along with a package name. "
"Please keep the version specifier, but remove the 'version' argument."
)
# if the version specifier is provided by version, append that into the package
packages[0] = Package(to_native(packages[0]), version)
if module.params['editable']:
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if extra_args:
cmd.extend(shlex.split(extra_args))
if module.params['break_system_packages']:
# Using an env var instead of the `--break-system-packages` option, to avoid failing under pip 23.0.0 and earlier.
# See: https://github.com/pypa/pip/pull/11780
os.environ['PIP_BREAK_SYSTEM_PACKAGES'] = '1'
if name:
cmd.extend(to_native(p) for p in packages)
elif requirements:
cmd.extend(['-r', requirements])
else:
module.exit_json(
changed=False,
warnings=["No valid name or requirements file found."],
)
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
# Older versions of pip (pre-1.3) do not have pip list.
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
if pkg in name:
formatted_dep = _get_package_info(module, pkg, py_bin)
if formatted_dep is not None:
pkg_list.append(formatted_dep)
out += '%s\n' % formatted_dep
for package in packages:
is_present = _is_present(module, package, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
out_freeze_before = None
if requirements or has_vcs:
dummy, out_freeze_before, dummy = _get_packages(module, pip, chdir)
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
('not installed' in out_pip or 'not installed' in err_pip):
pass # rc is 1 when attempting to uninstall non-installed package
elif rc != 0:
_fail(module, cmd, out, err)
if state == 'absent':
changed = 'Successfully uninstalled' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
dummy, out_freeze_after, dummy = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
changed = changed or venv_created
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
if old_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
main()
| 32,791
|
Python
|
.py
| 760
| 35.043421
| 142
| 0.641111
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,198
|
find.py
|
ansible_ansible/lib/ansible/modules/find.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: find
author: Brian Coca (@bcoca)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(ansible.windows.win_find) module instead.
- This module does not use the C(find) command, it is a much simpler and slower Python implementation.
It is intended for small and simple uses. Those that need the extra power or speed and have expertise
with the UNIX command, should use it directly.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
- Use a negative age to find files equal to or less than the specified time.
- You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
type: str
get_checksum:
default: false
checksum_algorithm:
version_added: "2.19"
patterns:
default: []
description:
- One or more (shell or regex) patterns, which type is controlled by O(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
- The pattern is matched against the file base name, excluding the directory.
- When using regexen, the pattern MUST match the ENTIRE file name, not just parts of it. So
if you are looking to match all files ending in .default, you'd need to use C(.*\.default)
as a regexp and not just C(\.default).
- This parameter expects a list, which can be either comma separated or YAML. If any of the
patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
in undesirable ways.
- Defaults to V(*) when O(use_regex=False), or V(.*) when O(use_regex=True).
type: list
aliases: [ pattern ]
elements: str
excludes:
description:
- One or more (shell or regex) patterns, which type is controlled by O(use_regex) option.
- Items whose basenames match an O(excludes) pattern are culled from O(patterns) matches.
Multiple patterns can be specified using a list.
type: list
aliases: [ exclude ]
version_added: "2.5"
elements: str
contains:
description:
- A regular expression or pattern which should be matched against the file content.
- If O(read_whole_file=false) it matches against the beginning of the line (uses
V(re.match(\))). If O(read_whole_file=true), it searches anywhere for that pattern
(uses V(re.search(\))).
- Works only when O(file_type) is V(file).
type: str
read_whole_file:
description:
- When doing a C(contains) search, determines whether the whole file should be read into
memory or if the regex should be applied to the file line-by-line.
- Setting this to C(true) can have performance and memory implications for large files.
- This uses V(re.search(\)) instead of V(re.match(\)).
type: bool
default: false
version_added: "2.11"
paths:
description:
- List of paths of directories to search. All paths must be fully qualified.
- From ansible-core 2.18 and onwards, the data type has changed from C(str) to C(path).
type: list
required: true
aliases: [ name, path ]
elements: path
file_type:
description:
- Type of file to select.
- The V(link) and V(any) choices were added in Ansible 2.3.
type: str
choices: [ any, directory, file, link ]
default: file
recurse:
description:
- If target is a directory, recursively descend into the directory looking for files.
type: bool
default: no
size:
description:
- Select files whose size is equal to or greater than the specified size.
- Use a negative size to find files equal to or less than the specified size.
- Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
- Size is not evaluated for directories.
type: str
age_stamp:
description:
- Choose the file property against which we compare age.
type: str
choices: [ atime, ctime, mtime ]
default: mtime
hidden:
description:
- Set this to V(true) to include hidden files, otherwise they will be ignored.
type: bool
default: no
mode:
description:
- Choose objects matching a specified permission. This value is
restricted to modes that can be applied using the python
C(os.chmod) function.
- The mode can be provided as an octal such as V("0644") or
as symbolic such as V(u=rw,g=r,o=r).
type: raw
version_added: '2.16'
exact_mode:
description:
- Restrict mode matching to exact matches only, and not as a
minimum set of permissions to match.
type: bool
default: true
version_added: '2.16'
follow:
description:
- Set this to V(true) to follow symlinks in path for systems with python 2.6+.
type: bool
default: no
use_regex:
description:
- If V(false), the patterns are file globs (shell).
- If V(true), they are python regexes.
type: bool
default: no
depth:
description:
- Set the maximum number of levels to descend into.
- Setting O(recurse=false) will override this value, which is effectively depth 1.
- Default is unlimited depth.
type: int
version_added: "2.6"
encoding:
description:
- When doing a O(contains) search, determine the encoding of the files to be searched.
type: str
version_added: "2.17"
limit:
description:
- Limit the maximum number of matching paths returned. After finding this many, the find action will stop looking.
- Matches are made from the top, down (i.e. shallowest directory first).
- If not set, or set to v(null), it will do unlimited matches.
- Default is unlimited matches.
type: int
version_added: "2.18"
extends_documentation_fragment: [action_common_attributes, checksum_common]
attributes:
check_mode:
details: since this action does not modify the target it just executes normally during check mode
support: full
diff_mode:
support: none
platform:
platforms: posix
seealso:
- module: ansible.windows.win_find
"""
EXAMPLES = r"""
- name: Recursively find /tmp files older than 2 days
ansible.builtin.find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
ansible.builtin.find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
ansible.builtin.find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
ansible.builtin.find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
ansible.builtin.find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
- name: Find /var/log all directories, exclude nginx and mysql
ansible.builtin.find:
paths: /var/log
recurse: no
file_type: directory
excludes: 'nginx,mysql'
# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
- name: Use a single pattern that contains a comma formatted as a list
ansible.builtin.find:
paths: /var/log
file_type: file
use_regex: yes
patterns: ['^_[0-9]{2,4}_.*.log$']
- name: Use multiple patterns that contain a comma formatted as a YAML list
ansible.builtin.find:
paths: /var/log
file_type: file
use_regex: yes
patterns:
- '^_[0-9]{2,4}_.*.log$'
- '^[a-z]{1,5}_.*log$'
- name: Find file containing "wally" without necessarily reading all files
ansible.builtin.find:
paths: /var/log
file_type: file
contains: wally
read_whole_file: true
patterns: "^.*\\.log$"
use_regex: true
recurse: true
limit: 1
"""
RETURN = r"""
files:
description: All matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: Number of matches
returned: success
type: int
sample: 14
examined:
description: Number of filesystem objects looked at
returned: success
type: int
sample: 34
skipped_paths:
description: skipped paths and reasons they were skipped
returned: success
type: dict
sample: {"/laskdfj": "'/laskdfj' is not a directory"}
version_added: '2.12'
"""
import errno
import fnmatch
import grp
import os
import pwd
import re
import stat
import time
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
class _Object:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def pfilter(f, patterns=None, excludes=None, use_regex=False):
"""filter using glob patterns"""
if not patterns and not excludes:
return True
if use_regex:
if patterns and not excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
elif patterns and excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
for e in excludes:
r = re.compile(e)
if r.match(f):
return False
return True
else:
if patterns and not excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
elif patterns and excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
for e in excludes:
if fnmatch.fnmatch(f, e):
return False
return True
return False
def agefilter(st, now, age, timestamp):
"""filter files older than age"""
if age is None:
return True
elif age >= 0 and now - getattr(st, "st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - getattr(st, "st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
"""filter files greater than size"""
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern, encoding, read_whole_file=False):
"""
Filter files which contain the given expression
:arg fsname: Filename to scan for lines matching a pattern
:arg pattern: Pattern to look for inside of line
:arg encoding: Encoding of the file to be scanned
:arg read_whole_file: If true, the whole file is read into memory before the regex is applied against it. Otherwise, the regex is applied line-by-line.
:rtype: bool
:returns: True if one of the lines in fsname matches the pattern. Otherwise False
"""
if pattern is None:
return True
prog = re.compile(pattern)
try:
with open(fsname, encoding=encoding) as f:
if read_whole_file:
return bool(prog.search(f.read()))
for line in f:
if prog.match(line):
return True
except LookupError as e:
raise e
except UnicodeDecodeError as e:
if encoding is None:
encoding = 'None (default determined by the Python built-in function "open")'
msg = f'Failed to read the file {fsname} due to an encoding error. current encoding: {encoding}'
raise Exception(msg) from e
except Exception:
pass
return False
def mode_filter(st, mode, exact, module):
if not mode:
return True
st_mode = stat.S_IMODE(st.st_mode)
try:
mode = int(mode, 8)
except ValueError:
mode = module._symbolic_mode_to_octal(_Object(st_mode=0), mode)
mode = stat.S_IMODE(mode)
if exact:
return st_mode == mode
return bool(st_mode & mode)
def statinfo(st):
pw_name = ""
gr_name = ""
try: # user data
pw_name = pwd.getpwuid(st.st_uid).pw_name
except Exception:
pass
try: # group data
gr_name = grp.getgrgid(st.st_gid).gr_name
except Exception:
pass
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'gr_name': gr_name,
'pw_name': pw_name,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path'], elements='path'),
patterns=dict(type='list', default=[], aliases=['pattern'], elements='str'),
excludes=dict(type='list', aliases=['exclude'], elements='str'),
contains=dict(type='str'),
read_whole_file=dict(type='bool', default=False),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
size=dict(type='str'),
recurse=dict(type='bool', default=False),
hidden=dict(type='bool', default=False),
follow=dict(type='bool', default=False),
get_checksum=dict(type='bool', default=False),
checksum_algorithm=dict(type='str', default='sha1',
choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
aliases=['checksum', 'checksum_algo']),
use_regex=dict(type='bool', default=False),
depth=dict(type='int'),
mode=dict(type='raw'),
exact_mode=dict(type='bool', default=True),
encoding=dict(type='str'),
limit=dict(type='int')
),
supports_check_mode=True,
)
params = module.params
if params['mode'] and not isinstance(params['mode'], string_types):
module.fail_json(
msg="argument 'mode' is not a string and conversion is not allowed, value is of type %s" % params['mode'].__class__.__name__
)
# Set the default match pattern to either a match-all glob or
# regex depending on use_regex being set. This makes sure if you
# set excludes: without a pattern pfilter gets something it can
# handle.
if not params['patterns']:
if params['use_regex']:
params['patterns'] = ['.*']
else:
params['patterns'] = ['*']
filelist = []
skipped = {}
def handle_walk_errors(e):
if e.errno in (errno.EPERM, errno.EACCES):
skipped[e.filename] = to_text(e)
return
raise e
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
if params['limit'] is not None and params['limit'] <= 0:
module.fail_json(msg="limit cannot be %d (use None for unlimited)" % params['limit'])
now = time.time()
msg = 'All paths examined'
looked = 0
has_warnings = False
for npath in params['paths']:
try:
if not os.path.isdir(npath):
raise Exception("'%s' is not a directory" % to_native(npath))
# Setting `topdown=True` to explicitly guarantee matches are made from the shallowest directory first
for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow'], topdown=True):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if params['depth']:
wpath = npath.rstrip(os.path.sep) + os.path.sep
depth = int(fsname.count(os.path.sep)) - int(wpath.count(os.path.sep)) + 1
if depth > params['depth']:
# Empty the list used by os.walk to avoid traversing deeper unnecessarily
del dirs[:]
continue
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except (IOError, OSError) as e:
module.warn("Skipped entry '%s' due to this access issue: %s\n" % (fsname, to_text(e)))
skipped[fsname] = to_text(e)
has_warnings = True
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and
agefilter(st, now, age, params['age_stamp']) and
mode_filter(st, params['mode'], params['exact_mode'], module)):
r.update(statinfo(st))
if stat.S_ISREG(st.st_mode) and params['get_checksum']:
r['checksum'] = module.digest_from_file(fsname, params['checksum_algorithm'])
if stat.S_ISREG(st.st_mode):
if sizefilter(st, size):
filelist.append(r)
else:
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and
agefilter(st, now, age, params['age_stamp']) and
mode_filter(st, params['mode'], params['exact_mode'], module)):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and
agefilter(st, now, age, params['age_stamp']) and
sizefilter(st, size) and
contentfilter(fsname, params['contains'], params['encoding'], params['read_whole_file']) and
mode_filter(st, params['mode'], params['exact_mode'], module)):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.digest_from_file(fsname, params['checksum_algorithm'])
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and
agefilter(st, now, age, params['age_stamp']) and
mode_filter(st, params['mode'], params['exact_mode'], module)):
r.update(statinfo(st))
filelist.append(r)
if len(filelist) == params["limit"]:
# Breaks out of directory files loop only
msg = "Limit of matches reached"
break
if not params['recurse'] or len(filelist) == params["limit"]:
break
except Exception as e:
skipped[npath] = to_text(e)
module.warn("Skipped '%s' path due to this access issue: %s\n" % (to_text(npath), skipped[npath]))
has_warnings = True
if has_warnings:
msg = 'Not all paths examined, check warnings for details'
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked, skipped_paths=skipped)
if __name__ == '__main__':
main()
| 24,037
|
Python
|
.py
| 567
| 32.102293
| 155
| 0.580875
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,199
|
unarchive.py
|
ansible_ansible/lib/ansible/modules/unarchive.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
# Copyright: (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2016, Dag Wieers <dag@wieers.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
---
module: unarchive
version_added: '1.4'
short_description: Unpacks an archive after (optionally) copying it from the local machine
description:
- The M(ansible.builtin.unarchive) module unpacks an archive. It will not unpack a compressed file that does not contain an archive.
- By default, it will copy the source file from the local system to the target before unpacking.
- Set O(remote_src=yes) to unpack an archive which already exists on the target.
- If checksum validation is desired, use M(ansible.builtin.get_url) or M(ansible.builtin.uri) instead to fetch the file and set O(remote_src=yes).
- For Windows targets, use the M(community.windows.win_unzip) module instead.
options:
src:
description:
- If O(remote_src=no) (default), local path to archive file to copy to the target server; can be absolute or relative. If O(remote_src=yes), path on the
target server to existing archive file to unpack.
- If O(remote_src=yes) and O(src) contains V(://), the remote machine will download the file from the URL first. (version_added 2.0). This is only for
simple cases, for full download support use the M(ansible.builtin.get_url) module.
type: path
required: true
dest:
description:
- Remote absolute path where the archive should be unpacked.
- The given path must exist. Base directory is not created by this module.
type: path
required: true
copy:
description:
- If true, the file is copied from local controller to the managed (remote) node, otherwise, the plugin will look for src archive on the managed machine.
- This option has been deprecated in favor of O(remote_src).
- This option is mutually exclusive with O(remote_src).
type: bool
default: yes
creates:
description:
- If the specified absolute path (file or directory) already exists, this step will B(not) be run.
- The specified absolute path (file or directory) must be below the base path given with O(dest).
type: path
version_added: "1.6"
io_buffer_size:
description:
- Size of the volatile memory buffer that is used for extracting files from the archive in bytes.
type: int
default: 65536
version_added: "2.12"
list_files:
description:
- If set to True, return the list of files that are contained in the tarball.
type: bool
default: no
version_added: "2.0"
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
- Mutually exclusive with O(include).
type: list
default: []
elements: str
version_added: "2.1"
include:
description:
- List of directory and file entries that you would like to extract from the archive. If O(include)
is not empty, only files listed here will be extracted.
- Mutually exclusive with O(exclude).
type: list
default: []
elements: str
version_added: "2.11"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
type: bool
default: no
version_added: "2.1"
extra_opts:
description:
- Specify additional options by passing in an array.
- Each space-separated command-line option should be a new element of the array. See examples.
- Command-line options with multiple elements must use multiple lines in the array, one for each element.
type: list
elements: str
default: []
version_added: "2.1"
remote_src:
description:
- Set to V(true) to indicate the archived file is already on the remote system and not local to the Ansible controller.
- This option is mutually exclusive with O(copy).
type: bool
default: no
version_added: "2.2"
validate_certs:
description:
- This only applies if using a https URL as the source of the file.
- This should only set to V(false) used on personally controlled sites using self-signed certificate.
- Prior to 2.2 the code worked as if this was set to V(true).
type: bool
default: yes
version_added: "2.2"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
- action_common_attributes.files
- decrypt
- files
attributes:
action:
support: full
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: partial
details: Not supported for gzipped tar files.
diff_mode:
support: partial
details: Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not supported, it will always unpack the archive.
platform:
platforms: posix
safe_file_operations:
support: none
vault:
support: full
todo:
- Re-implement tar support using native tarfile module.
- Re-implement zip support using native zipfile module.
notes:
- Requires C(zipinfo) and C(gtar)/C(unzip) command on target host.
- Requires C(zstd) command on target host to expand I(.tar.zst) files.
- Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2), I(.tar.xz), and I(.tar.zst) files using C(gtar).
- Does not handle I(.gz) files, I(.bz2) files, I(.xz), or I(.zst) files that do not contain a I(.tar) archive.
- Existing files/directories in the destination which are not in the archive
are not touched. This is the same behavior as a normal archive extraction.
- Existing files/directories in the destination which are not in the archive
are ignored for purposes of deciding if the archive should be unpacked or not.
seealso:
- module: community.general.archive
- module: community.general.iso_extract
- module: community.windows.win_unzip
author: Michael DeHaan
"""
EXAMPLES = r"""
- name: Extract foo.tgz into /var/lib/foo
ansible.builtin.unarchive:
src: foo.tgz
dest: /var/lib/foo
- name: Unarchive a file that is already on the remote machine
ansible.builtin.unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
remote_src: yes
- name: Unarchive a file that needs to be downloaded (added in 2.0)
ansible.builtin.unarchive:
src: https://example.com/example.zip
dest: /usr/local/bin
remote_src: yes
- name: Unarchive a file with extra options
ansible.builtin.unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
extra_opts:
- --transform
- s/^xxx/yyy/
"""
RETURN = r"""
dest:
description: Path to the destination directory.
returned: always
type: str
sample: /opt/software
files:
description: List of all the files in the archive.
returned: When O(list_files) is V(True)
type: list
sample: '["file1", "file2"]'
gid:
description: Numerical ID of the group that owns the destination directory.
returned: always
type: int
sample: 1000
group:
description: Name of the group that owns the destination directory.
returned: always
type: str
sample: "librarians"
handler:
description: Archive software handler used to extract and decompress the archive.
returned: always
type: str
sample: "TgzArchive"
mode:
description: String that represents the octal permissions of the destination directory.
returned: always
type: str
sample: "0755"
owner:
description: Name of the user that owns the destination directory.
returned: always
type: str
sample: "paul"
size:
description: The size of destination directory in bytes. Does not include the size of files or subdirectories contained within.
returned: always
type: int
sample: 36
src:
description:
- The source archive's path.
- If O(src) was a remote web URL, or from the local ansible controller, this shows the temporary location where the download was stored.
returned: always
type: str
sample: "/home/paul/test.tar.gz"
state:
description: State of the destination. Effectively always "directory".
returned: always
type: str
sample: "directory"
uid:
description: Numerical ID of the user that owns the destination directory.
returned: always
type: int
sample: 1000
"""
import binascii
import codecs
import fnmatch
import grp
import os
import platform
import pwd
import re
import stat
import time
import traceback
from functools import partial
from zipfile import ZipFile
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.urls import fetch_file
from shlex import quote
from zipfile import BadZipFile
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
GROUP_DIFF_RE = re.compile(r': Gid differs$')
MODE_DIFF_RE = re.compile(r': Mode differs$')
MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
# NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
INVALID_OWNER_RE = re.compile(r': Invalid owner')
INVALID_GROUP_RE = re.compile(r': Invalid group')
SYMLINK_DIFF_RE = re.compile(r': Symlink differs$')
CONTENT_DIFF_RE = re.compile(r': Contents differ$')
SIZE_DIFF_RE = re.compile(r': Size differs$')
def crc32(path, buffer_size):
""" Return a CRC32 checksum of a file """
crc = binascii.crc32(b'')
with open(path, 'rb') as f:
for b_block in iter(partial(f.read, buffer_size), b''):
crc = binascii.crc32(b_block, crc)
return crc & 0xffffffff
def shell_escape(string):
""" Quote meta-characters in the args for the unix shell """
return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
class UnarchiveError(Exception):
pass
class ZipArchive(object):
def __init__(self, src, b_dest, file_args, module):
self.src = src
self.b_dest = b_dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
self.io_buffer_size = module.params["io_buffer_size"]
self.excludes = module.params['exclude']
self.includes = []
self.include_files = self.module.params['include']
self.cmd_path = None
self.zipinfo_cmd_path = None
self._files_in_archive = []
self._infodict = dict()
self.zipinfoflag = ''
self.binaries = (
('unzip', 'cmd_path'),
('zipinfo', 'zipinfo_cmd_path'),
)
def _permstr_to_octal(self, modestr, umask):
""" Convert a Unix permission string (rw-r--r--) into a mode (0644) """
revstr = modestr[::-1]
mode = 0
for j in range(0, 3):
for i in range(0, 3):
if revstr[i + 3 * j] in ['r', 'w', 'x', 's', 't']:
mode += 2 ** (i + 3 * j)
# The unzip utility does not support setting the stST bits
# if revstr[i + 3 * j] in ['s', 't', 'S', 'T' ]:
# mode += 2 ** (9 + j)
return (mode & ~umask)
def _legacy_file_list(self):
rc, out, err = self.module.run_command([self.cmd_path, '-v', self.src])
if rc:
self.module.debug(err)
raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
for line in out.splitlines()[3:-2]:
fields = line.split(None, 7)
self._files_in_archive.append(fields[7])
self._infodict[fields[7]] = int(fields[6])
def _crc32(self, path):
if self._infodict:
return self._infodict[path]
try:
archive = ZipFile(self.src)
except BadZipFile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for item in archive.infolist():
self._infodict[item.filename] = int(item.CRC)
except Exception:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
return self._infodict[path]
@property
def files_in_archive(self):
if self._files_in_archive:
return self._files_in_archive
self._files_in_archive = []
try:
archive = ZipFile(self.src)
except BadZipFile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for member in archive.namelist():
if self.include_files:
for include in self.include_files:
if fnmatch.fnmatch(member, include):
self._files_in_archive.append(to_native(member))
else:
exclude_flag = False
if self.excludes:
for exclude in self.excludes:
if fnmatch.fnmatch(member, exclude):
exclude_flag = True
break
if not exclude_flag:
self._files_in_archive.append(to_native(member))
except Exception as e:
archive.close()
raise UnarchiveError('Unable to list files in the archive: %s' % to_native(e))
archive.close()
return self._files_in_archive
def _valid_time_stamp(self, timestamp_str):
""" Return a valid time object from the given time string """
DT_RE = re.compile(r'^(\d{4})(\d{2})(\d{2})\.(\d{2})(\d{2})(\d{2})$')
match = DT_RE.match(timestamp_str)
epoch_date_time = (1980, 1, 1, 0, 0, 0, 0, 0, 0)
if match:
try:
if int(match.groups()[0]) < 1980:
date_time = epoch_date_time
elif int(match.groups()[0]) > 2107:
date_time = (2107, 12, 31, 23, 59, 59, 0, 0, 0)
else:
date_time = (int(m) for m in match.groups() + (0, 0, 0))
except ValueError:
date_time = epoch_date_time
else:
# Assume epoch date
date_time = epoch_date_time
return time.mktime(time.struct_time(date_time))
def is_unarchived(self):
# BSD unzip doesn't support zipinfo listings with timestamp.
if self.zipinfoflag:
cmd = [self.zipinfo_cmd_path, self.zipinfoflag, '-T', '-s', self.src]
else:
cmd = [self.zipinfo_cmd_path, '-T', '-s', self.src]
if self.excludes:
cmd.extend(['-x', ] + self.excludes)
if self.include_files:
cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd)
self.module.debug(err)
old_out = out
diff = ''
out = ''
if rc == 0:
unarchived = True
else:
unarchived = False
# Get some information related to user/group ownership
umask = os.umask(0)
os.umask(umask)
systemtype = platform.system()
# Get current user and group information
groups = os.getgroups()
run_uid = os.getuid()
run_gid = os.getgid()
try:
run_owner = pwd.getpwuid(run_uid).pw_name
except (TypeError, KeyError):
run_owner = run_uid
try:
run_group = grp.getgrgid(run_gid).gr_name
except (KeyError, ValueError, OverflowError):
run_group = run_gid
# Get future user ownership
fut_owner = fut_uid = None
if self.file_args['owner']:
try:
tpw = pwd.getpwnam(self.file_args['owner'])
except KeyError:
try:
tpw = pwd.getpwuid(int(self.file_args['owner']))
except (TypeError, KeyError, ValueError):
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
else:
try:
fut_owner = run_owner
except Exception:
pass
fut_uid = run_uid
# Get future group ownership
fut_group = fut_gid = None
if self.file_args['group']:
try:
tgr = grp.getgrnam(self.file_args['group'])
except (ValueError, KeyError):
try:
# no need to check isdigit() explicitly here, if we fail to
# parse, the ValueError will be caught.
tgr = grp.getgrgid(int(self.file_args['group']))
except (KeyError, ValueError, OverflowError):
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
fut_gid = tgr.gr_gid
else:
try:
fut_group = run_group
except Exception:
pass
fut_gid = run_gid
for line in old_out.splitlines():
change = False
pcs = line.split(None, 7)
if len(pcs) != 8:
# Too few fields... probably a piece of the header or footer
continue
# Check first and seventh field in order to skip header/footer
# 7 or 8 are FAT, 10 is normal unix perms
if len(pcs[0]) not in (7, 8, 10):
continue
if len(pcs[6]) != 15:
continue
# Possible entries:
# -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
# -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
# -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
# --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
permstr = pcs[0][1:]
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
out += 'Path %s is excluded on request\n' % path
continue
# Itemized change requires L for symlink
if path[-1] == '/':
if ztype != 'd':
err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
ftype = 'd'
elif ztype == 'l':
ftype = 'L'
elif ztype == '-':
ftype = 'f'
elif ztype == '?':
ftype = 'f'
# Some files may be storing FAT permissions, not Unix permissions
# For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666.
# This permission will then be modified by the system UMask.
# BSD always applies the Umask, even to Unix permissions.
# For Unix style permissions on Linux or Mac, we want to use them directly.
# So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal
if len(permstr) == 6:
if path[-1] == '/':
permstr = 'rwxrwxrwx'
elif permstr == 'rwx---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
file_umask = umask
elif len(permstr) == 7:
if permstr == 'rwxa---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
file_umask = umask
elif 'bsd' in systemtype.lower():
file_umask = umask
else:
file_umask = 0
# Test string conformity
if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
# DEBUG
# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
b_dest = os.path.join(self.b_dest, to_bytes(path, errors='surrogate_or_strict'))
try:
st = os.lstat(b_dest)
except Exception:
change = True
self.includes.append(path)
err += 'Path %s is missing\n' % path
diff += '>%s++++++.?? %s\n' % (ftype, path)
continue
# Compare file types
if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
change = True
self.includes.append(path)
err += 'File %s already exists, but not as a directory\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'f' and not stat.S_ISREG(st.st_mode):
change = True
unarchived = False
self.includes.append(path)
err += 'Directory %s already exists, but not as a regular file\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
change = True
self.includes.append(path)
err += 'Directory %s already exists, but not as a symlink\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
itemized = list('.%s.......??' % ftype)
# Note: this timestamp calculation has a rounding error
# somewhere... unzip and this timestamp can be one second off
# When that happens, we report a change and re-unzip the file
timestamp = self._valid_time_stamp(pcs[6])
# Compare file timestamps
if stat.S_ISREG(st.st_mode):
if self.module.params['keep_newer']:
if timestamp > st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s is older, replacing file\n' % path
itemized[4] = 't'
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
itemized[4] = 't'
# Compare file sizes
if stat.S_ISREG(st.st_mode) and size != st.st_size:
change = True
err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
itemized[3] = 's'
# Compare file checksums
if stat.S_ISREG(st.st_mode):
crc = crc32(b_dest, self.io_buffer_size)
if crc != self._crc32(path):
change = True
err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
itemized[2] = 'c'
# Compare file permissions
# Do not handle permissions of symlinks
if ftype != 'L':
# Use the new mode provided with the action, if there is one
if self.file_args['mode']:
if isinstance(self.file_args['mode'], int):
mode = self.file_args['mode']
else:
try:
mode = int(self.file_args['mode'], 8)
except Exception as e:
try:
mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
except ValueError as e:
self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
else:
mode = self._permstr_to_octal(permstr, file_umask)
if mode != stat.S_IMODE(st.st_mode):
change = True
itemized[5] = 'p'
err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
# Compare file user ownership
owner = uid = None
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except (TypeError, KeyError):
uid = st.st_uid
# If we are not root and requested owner is not our user, fail
if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
if owner and owner != fut_owner:
change = True
err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
itemized[6] = 'o'
elif uid and uid != fut_uid:
change = True
err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
itemized[6] = 'o'
# Compare file group ownership
group = gid = None
try:
group = grp.getgrgid(st.st_gid).gr_name
except (KeyError, ValueError, OverflowError):
gid = st.st_gid
if run_uid != 0 and (fut_group != run_group or fut_gid != run_gid) and fut_gid not in groups:
raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
if group and group != fut_group:
change = True
err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
itemized[6] = 'g'
elif gid and gid != fut_gid:
change = True
err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
itemized[6] = 'g'
# Register changed files and finalize diff output
if change:
if path not in self.includes:
self.includes.append(path)
diff += '%s %s\n' % (''.join(itemized), path)
if self.includes:
unarchived = False
# DEBUG
# out = old_out + out
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def unarchive(self):
cmd = [self.cmd_path, '-o']
if self.opts:
cmd.extend(self.opts)
cmd.append(self.src)
# NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend(['-x'] + self.excludes)
if self.include_files:
cmd.extend(self.include_files)
cmd.extend(['-d', self.b_dest])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
missing = []
for b in self.binaries:
try:
setattr(self, b[1], get_bin_path(b[0]))
except ValueError:
missing.append(b[0])
if missing:
return False, "Unable to find required '{missing}' binary in the path.".format(missing="' or '".join(missing))
cmd = [self.cmd_path, '-l', self.src]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True, None
self.module.debug(err)
return False, 'Command "%s" could not handle archive: %s' % (self.cmd_path, err)
class TgzArchive(object):
def __init__(self, src, b_dest, file_args, module):
self.src = src
self.b_dest = b_dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
self.include_files = self.module.params['include']
self.cmd_path = None
self.tar_type = None
self.zipflag = '-z'
self._files_in_archive = []
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
tar_type = None
if out.startswith('bsdtar'):
tar_type = 'bsd'
elif out.startswith('tar') and 'GNU' in out:
tar_type = 'gnu'
return tar_type
@property
def files_in_archive(self):
if self._files_in_archive:
return self._files_in_archive
cmd = [self.cmd_path, '--list', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
if self.include_files:
cmd.extend(self.include_files)
locale = get_best_parsable_locale(self.module)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
if rc != 0:
self.module.debug(err)
raise UnarchiveError('Unable to list files in the archive: %s' % err)
for filename in out.splitlines():
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
# filename = filename.decode('string_escape')
filename = to_native(codecs.escape_decode(filename)[0])
# We don't allow absolute filenames. If the user wants to unarchive rooted in "/"
# they need to use "dest: '/'". This follows the defaults for gtar, pax, etc.
# Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
if filename.startswith('/'):
filename = filename[1:]
exclude_flag = False
if self.excludes:
for exclude in self.excludes:
if fnmatch.fnmatch(filename, exclude):
exclude_flag = True
break
if not exclude_flag:
self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
cmd = [self.cmd_path, '--diff', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
if self.include_files:
cmd.extend(self.include_files)
locale = get_best_parsable_locale(self.module)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
# Check whether the differences are in something that we're
# setting anyway
# What is different
unarchived = True
old_out = out
out = ''
run_uid = os.getuid()
# When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
# Only way to be sure is to check request with what is on disk (as we do for zip)
# Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
for line in old_out.splitlines() + err.splitlines():
# FIXME: Remove the bogus lines from error-output as well !
# Ignore bogus errors on empty filenames (when using --split-component)
if EMPTY_FILE_RE.search(line):
continue
if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
out += line + '\n'
if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
out += line + '\n'
if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
out += line + '\n'
differ_regexes = [
MOD_TIME_DIFF_RE, MISSING_FILE_RE, INVALID_OWNER_RE,
INVALID_GROUP_RE, SYMLINK_DIFF_RE, CONTENT_DIFF_RE,
SIZE_DIFF_RE
]
for regex in differ_regexes:
if regex.search(line):
out += line + '\n'
if out:
unarchived = False
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
cmd = [self.cmd_path, '--extract', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
if self.include_files:
cmd.extend(self.include_files)
locale = get_best_parsable_locale(self.module)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
try:
self.cmd_path = get_bin_path('gtar')
except ValueError:
# Fallback to tar
try:
self.cmd_path = get_bin_path('tar')
except ValueError:
return False, "Unable to find required 'gtar' or 'tar' binary in the path"
self.tar_type = self._get_tar_type()
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
return True, None
except UnarchiveError as e:
return False, 'Command "%s" could not handle archive: %s' % (self.cmd_path, to_native(e))
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False, 'Command "%s" found no files in archive. Empty archive files are not supported.' % self.cmd_path
# Class to handle tar files that aren't compressed
class TarArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarArchive, self).__init__(src, b_dest, file_args, module)
# argument to tar
self.zipflag = ''
# Class to handle bzip2 compressed tar files
class TarBzipArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarBzipArchive, self).__init__(src, b_dest, file_args, module)
self.zipflag = '-j'
# Class to handle xz compressed tar files
class TarXzArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarXzArchive, self).__init__(src, b_dest, file_args, module)
self.zipflag = '-J'
# Class to handle zstd compressed tar files
class TarZstdArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarZstdArchive, self).__init__(src, b_dest, file_args, module)
# GNU Tar supports the --use-compress-program option to
# specify which executable to use for
# compression/decompression.
#
# Note: some flavors of BSD tar support --zstd (e.g., FreeBSD
# 12.2), but the TgzArchive class only supports GNU Tar.
self.zipflag = '--use-compress-program=zstd'
class ZipZArchive(ZipArchive):
def __init__(self, src, b_dest, file_args, module):
super(ZipZArchive, self).__init__(src, b_dest, file_args, module)
# NOTE: adds 'l', which is default on most linux but not all implementations
self.zipinfoflag = '-Zl'
self.binaries = (
('unzip', 'cmd_path'),
('unzip', 'zipinfo_cmd_path'),
)
def can_handle_archive(self):
unzip_available, error_msg = super(ZipZArchive, self).can_handle_archive()
if not unzip_available:
return unzip_available, error_msg
# Ensure unzip -Z is available before we use it in is_unarchive
cmd = [self.zipinfo_cmd_path, self.zipinfoflag]
rc, out, err = self.module.run_command(cmd)
if 'zipinfo' in out.lower():
return True, None
return False, 'Command "unzip -Z" could not handle archive: %s' % err
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, ZipZArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive, TarZstdArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
(can_handle, reason) = obj.can_handle_archive()
if can_handle:
return obj
reasons.add(reason)
reason_msg = '\n'.join(reasons)
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.\n%s' % (src, reason_msg))
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(type='path', required=True),
dest=dict(type='path', required=True),
remote_src=dict(type='bool', default=False),
creates=dict(type='path'),
list_files=dict(type='bool', default=False),
keep_newer=dict(type='bool', default=False),
exclude=dict(type='list', elements='str', default=[]),
include=dict(type='list', elements='str', default=[]),
extra_opts=dict(type='list', elements='str', default=[]),
validate_certs=dict(type='bool', default=True),
io_buffer_size=dict(type='int', default=64 * 1024),
# Options that are for the action plugin, but ignored by the module itself.
# We have them here so that the sanity tests pass without ignores, which
# reduces the likelihood of further bugs added.
copy=dict(type='bool', default=True),
decrypt=dict(type='bool', default=True),
),
add_file_common_args=True,
# check-mode only works for zip files, we cover that later
supports_check_mode=True,
mutually_exclusive=[('include', 'exclude')],
)
src = module.params['src']
dest = module.params['dest']
abs_dest = os.path.abspath(dest)
b_dest = to_bytes(abs_dest, errors='surrogate_or_strict')
if not os.path.isabs(dest):
module.warn("Relative destination path '{dest}' was resolved to absolute path '{abs_dest}'.".format(dest=dest, abs_dest=abs_dest))
remote_src = module.params['remote_src']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
if not remote_src:
module.fail_json(msg="Source '%s' failed to transfer" % src)
# If remote_src=true, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
src = fetch_file(module, src)
else:
module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
# ensure src is an absolute path before picking handlers
src = os.path.abspath(src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception as e:
module.fail_json(msg="Source '%s' not readable, %s" % (src, to_native(e)))
# is dest OK to receive tar file?
if not os.path.isdir(b_dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
handler = pick_handler(src, b_dest, file_args, module)
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
check_results = handler.is_unarchived()
# DEBUG
# res_args['check_results'] = check_results
if module.check_mode:
res_args['changed'] = not check_results['unarchived']
elif check_results['unarchived']:
res_args['changed'] = False
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
else:
res_args['changed'] = True
# Get diff if required
if check_results.get('diff', False):
res_args['diff'] = {'prepared': check_results['diff']}
# Run only if we found differences (idempotence) or diff was missing
if res_args.get('diff', True) and not module.check_mode:
# do we need to change perms?
top_folders = []
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
except (IOError, OSError) as e:
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
if '/' in filename:
top_folder_path = filename.split('/')[0]
if top_folder_path not in top_folders:
top_folders.append(top_folder_path)
# make sure top folders have the right permissions
# https://github.com/ansible/ansible/issues/35426
if top_folders:
for f in top_folders:
file_args['path'] = "%s/%s" % (dest, f)
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
except (IOError, OSError) as e:
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| 45,405
|
Python
|
.py
| 1,016
| 34.471457
| 159
| 0.58545
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|