id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,400
|
unarchive.py
|
ansible_ansible/lib/ansible/plugins/action/unarchive.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleActionSkip
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
""" handler for unarchive operations """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
creates = self._task.args.get('creates', None)
decrypt = self._task.args.get('decrypt', True)
try:
# "copy" is deprecated in favor of "remote_src".
if 'copy' in self._task.args:
# They are mutually exclusive.
if 'remote_src' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')")
# We will take the information from copy and store it in
# the remote_src var to use later in this file.
self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False)
if source is None or dest is None:
raise AnsibleActionFail("src (or content) and dest are required")
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
creates = self._remote_expand_user(creates)
if self._remote_file_exists(creates):
raise AnsibleActionSkip("skipped, since %s exists" % creates)
dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
source = os.path.expanduser(source)
if not remote_src:
try:
source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
try:
remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
if not remote_stat['exists'] or not remote_stat['isdir']:
raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)
if not remote_src:
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
self._transfer_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
# remove action plugin only keys
new_module_args = self._task.args.copy()
for key in ('decrypt',):
if key in new_module_args:
del new_module_args[key]
if not remote_src:
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
new_module_args['src'] = tmp_src
# execute the unarchive module now, with the updated args (using ansible.legacy prefix to eliminate collections
# collisions with local override
result.update(self._execute_module(module_name='ansible.legacy.unarchive', module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 4,859
|
Python
|
.py
| 90
| 43.255556
| 137
| 0.637397
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,401
|
wait_for_connection.py
|
ansible_ansible/lib/ansible/plugins/action/wait_for_connection.py
|
# (c) 2017, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# CI-required python3 boilerplate
from __future__ import annotations
import time
from datetime import datetime, timedelta, timezone
from ansible.module_utils.common.text.converters import to_text
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('connect_timeout', 'delay', 'sleep', 'timeout'))
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_DELAY = 0
DEFAULT_SLEEP = 1
DEFAULT_TIMEOUT = 600
def do_until_success_or_timeout(self, what, timeout, connect_timeout, what_desc, sleep=1):
max_end_time = datetime.now(timezone.utc) + timedelta(seconds=timeout)
e = None
while datetime.now(timezone.utc) < max_end_time:
try:
what(connect_timeout)
if what_desc:
display.debug("wait_for_connection: %s success" % what_desc)
return
except Exception as e:
error = e # PY3 compatibility to store exception for use outside of this block
if what_desc:
display.debug("wait_for_connection: %s fail (expected), retrying in %d seconds..." % (what_desc, sleep))
time.sleep(sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, error))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
delay = int(self._task.args.get('delay', self.DEFAULT_DELAY))
sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP))
timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT))
if self._task.check_mode:
display.vvv("wait_for_connection: skipping for check_mode")
return dict(skipped=True)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
def ping_module_test(connect_timeout):
""" Test ping module, if available """
display.vvv("wait_for_connection: attempting ping module test")
# re-run interpreter discovery if we ran it in the first iteration
if self._discovered_interpreter_key:
task_vars['ansible_facts'].pop(self._discovered_interpreter_key, None)
# call connection reset between runs if it's there
try:
self._connection.reset()
except AttributeError:
pass
ping_result = self._execute_module(module_name='ansible.legacy.ping', module_args=dict(), task_vars=task_vars)
# Test module output
if ping_result['ping'] != 'pong':
raise Exception('ping test failed')
start = datetime.now()
if delay:
time.sleep(delay)
try:
# If the connection has a transport_test method, use it first
if hasattr(self._connection, 'transport_test'):
self.do_until_success_or_timeout(self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep)
# Use the ping module test to determine end-to-end connectivity
self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test", sleep=sleep)
except TimedOutException as e:
result['failed'] = True
result['msg'] = to_text(e)
elapsed = datetime.now() - start
result['elapsed'] = elapsed.seconds
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 4,564
|
Python
|
.py
| 92
| 40.793478
| 152
| 0.657143
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,402
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/httpapi/__init__.py
|
# (c) 2018 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from abc import abstractmethod
from ansible.plugins import AnsiblePlugin
class HttpApiBase(AnsiblePlugin):
def __init__(self, connection):
super(HttpApiBase, self).__init__()
self.connection = connection
self._become = False
self._become_pass = ''
def set_become(self, become_context):
self._become = become_context.become
self._become_pass = getattr(become_context, 'become_pass') or ''
def login(self, username, password):
"""Call a defined login endpoint to receive an authentication token.
This should only be implemented if the API has a single endpoint which
can turn HTTP basic auth into a token which can be reused for the rest
of the calls for the session.
"""
pass
def logout(self):
""" Call to implement session logout.
Method to clear session gracefully e.g. tokens granted in login
need to be revoked.
"""
pass
def update_auth(self, response, response_text):
"""Return per-request auth token.
The response should be a dictionary that can be plugged into the
headers of a request. The default implementation uses cookie data.
If no authentication data is found, return None
"""
cookie = response.info().get('Set-Cookie')
if cookie:
return {'Cookie': cookie}
return None
def handle_httperror(self, exc):
"""Overridable method for dealing with HTTP codes.
This method will attempt to handle known cases of HTTP status codes.
If your API uses status codes to convey information in a regular way,
you can override this method to handle it appropriately.
:returns:
* True if the code has been handled in a way that the request
may be resent without changes.
* False if the error cannot be handled or recovered from by the
plugin. This will result in the HTTPError being raised as an
exception for the caller to deal with as appropriate (most likely
by failing).
* Any other value returned is taken as a valid response from the
server without making another request. In many cases, this can just
be the original exception.
"""
if exc.code == 401:
if self.connection._auth:
# Stored auth appears to be invalid, clear and retry
self.connection._auth = None
self.login(self.connection.get_option('remote_user'), self.connection.get_option('password'))
return True
else:
# Unauthorized and there's no token. Return an error
return False
return exc
@abstractmethod
def send_request(self, data, **message_kwargs):
"""Prepares and sends request(s) to device."""
pass
| 3,093
|
Python
|
.py
| 67
| 36.58209
| 109
| 0.643831
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,403
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/cliconf/__init__.py
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from abc import abstractmethod
from functools import wraps
from ansible.plugins import AnsiblePlugin
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_bytes, to_text
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
def enable_mode(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
prompt = self._connection.get_prompt()
if not to_text(prompt, errors='surrogate_or_strict').strip().endswith('#'):
raise AnsibleError('operation requires privilege escalation')
return func(self, *args, **kwargs)
return wrapped
class CliconfBase(AnsiblePlugin):
"""
A base class for implementing cli connections
.. note:: String inputs to :meth:`send_command` will be cast to byte strings
within this method and as such are not required to be made byte strings
beforehand. Please avoid using literal byte strings (``b'string'``) in
:class:`CliConfBase` plugins as this can lead to unexpected errors when
running on Python 3
List of supported rpc's:
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:get: Execute specified command on remote device
:get_capabilities: Retrieves device information and supported rpc methods
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
Note: List of supported rpc's for remote device can be extracted from
output of get_capabilities()
:returns: Returns output received from remote device as byte string
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
conn.get('show lldp neighbors detail')
conn.get_config('running')
conn.edit_config(['hostname test', 'netconf ssh'])
"""
__rpc__ = ['get_config', 'edit_config', 'get_capabilities', 'get', 'enable_response_logging', 'disable_response_logging']
def __init__(self, connection):
super(CliconfBase, self).__init__()
self._connection = connection
self.history = list()
self.response_logging = False
def _alarm_handler(self, signum, frame):
"""Alarm handler raised in case of command timeout """
self._connection.queue_message('log', 'closing shell due to command timeout (%s seconds).' % self._connection._play_context.timeout)
self.close()
def send_command(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, prompt_retry_check=False, check_all=False):
"""Executes a command over the device connection
This method will execute a command over the device connection and
return the results to the caller. This method will also perform
logging of any commands based on the `nolog` argument.
:param command: The command to send over the connection to the device
:param prompt: A single regex pattern or a sequence of patterns to evaluate the expected prompt from the command
:param answer: The answer to respond with if the prompt is matched.
:param sendonly: Bool value that will send the command but not wait for a result.
:param newline: Bool value that will append the newline character to the command
:param prompt_retry_check: Bool value for trying to detect more prompts
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:returns: The output from the device after executing the command
"""
kwargs = {
'command': to_bytes(command),
'sendonly': sendonly,
'newline': newline,
'prompt_retry_check': prompt_retry_check,
'check_all': check_all
}
if prompt is not None:
if isinstance(prompt, list):
kwargs['prompt'] = [to_bytes(p) for p in prompt]
else:
kwargs['prompt'] = to_bytes(prompt)
if answer is not None:
if isinstance(answer, list):
kwargs['answer'] = [to_bytes(p) for p in answer]
else:
kwargs['answer'] = to_bytes(answer)
resp = self._connection.send(**kwargs)
if not self.response_logging:
self.history.append(('*****', '*****'))
else:
self.history.append((kwargs['command'], resp))
return resp
def get_base_rpc(self):
"""Returns list of base rpc method supported by remote device"""
return self.__rpc__
def get_history(self):
""" Returns the history file for all commands
This will return a log of all the commands that have been sent to
the device and all of the output received. By default, all commands
and output will be redacted unless explicitly configured otherwise.
:return: An ordered list of command, output pairs
"""
return self.history
def reset_history(self):
""" Resets the history of run commands
:return: None
"""
self.history = list()
def enable_response_logging(self):
"""Enable logging command response"""
self.response_logging = True
def disable_response_logging(self):
"""Disable logging command response"""
self.response_logging = False
@abstractmethod
def get_config(self, source='running', flags=None, format=None):
"""Retrieves the specified configuration from the device
This method will retrieve the configuration specified by source and
return it to the caller as a string. Subsequent calls to this method
will retrieve a new configuration from the device
:param source: The configuration source to return from the device.
This argument accepts either `running` or `startup` as valid values.
:param flags: For devices that support configuration filtering, this
keyword argument is used to filter the returned configuration.
The use of this keyword argument is device dependent and will be
silently ignored on devices that do not support it.
:param format: For devices that support fetching different configuration
format, this keyword argument is used to specify the format in which
configuration is to be retrieved.
:return: The device configuration as specified by the source argument.
"""
pass
@abstractmethod
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
"""Loads the candidate configuration into the network device
This method will load the specified candidate config into the device
and merge with the current configuration unless replace is set to
True. If the device does not support config replace an errors
is returned.
:param candidate: The configuration to load into the device and merge
with the current running configuration
:param commit: Boolean value that indicates if the device candidate
configuration should be pushed in the running configuration or discarded.
:param replace: If the value is True/False it indicates if running configuration should be completely
replace by candidate configuration. If can also take configuration file path as value,
the file in this case should be present on the remote host in the mentioned path as a
prerequisite.
:param comment: Commit comment provided it is supported by remote host
:return: Returns a json string with contains configuration applied on remote host, the returned
response on executing configuration commands and platform relevant data.
{
"diff": "",
"response": [],
"request": []
}
"""
pass
@abstractmethod
def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False):
"""Execute specified command on remote device
This method will retrieve the specified data and
return it to the caller as a string.
:param command: command in string format to be executed on remote device
:param prompt: the expected prompt generated by executing command, this can
be a string or a list of strings
:param answer: the string to respond to the prompt with
:param sendonly: bool to disable waiting for response, default is false
:param newline: bool to indicate if newline should be added at end of answer or not
:param output: For devices that support fetching command output in different
format, this keyword argument is used to specify the output in which
response is to be retrieved.
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:return: The output from the device after executing the command
"""
pass
@abstractmethod
def get_capabilities(self):
"""Returns the basic capabilities of the network device
This method will provide some basic facts about the device and
what capabilities it has to modify the configuration. The minimum
return from this method takes the following format.
eg:
{
'rpc': [list of supported rpcs],
'network_api': <str>, # the name of the transport
'device_info': {
'network_os': <str>,
'network_os_version': <str>,
'network_os_model': <str>,
'network_os_hostname': <str>,
'network_os_image': <str>,
'network_os_platform': <str>,
},
'device_operations': {
'supports_diff_replace': <bool>, # identify if config should be merged or replaced is supported
'supports_commit': <bool>, # identify if commit is supported by device or not
'supports_rollback': <bool>, # identify if rollback is supported or not
'supports_defaults': <bool>, # identify if fetching running config with default is supported
'supports_commit_comment': <bool>, # identify if adding comment to commit is supported of not
'supports_onbox_diff': <bool>, # identify if on box diff capability is supported or not
'supports_generate_diff': <bool>, # identify if diff capability is supported within plugin
'supports_multiline_delimiter': <bool>, # identify if multiline delimiter is supported within config
'supports_diff_match': <bool>, # identify if match is supported
'supports_diff_ignore_lines': <bool>, # identify if ignore line in diff is supported
'supports_config_replace': <bool>, # identify if running config replace with candidate config is supported
'supports_admin': <bool>, # identify if admin configure mode is supported or not
'supports_commit_label': <bool>, # identify if commit label is supported or not
}
'format': [list of supported configuration format],
'diff_match': [list of supported match values],
'diff_replace': [list of supported replace values],
'output': [list of supported command output format]
}
:return: capability as dict
"""
result = {}
result['rpc'] = self.get_base_rpc()
result['device_info'] = self.get_device_info()
result['network_api'] = 'cliconf'
return result
@abstractmethod
def get_device_info(self):
"""Returns basic information about the network device.
This method will provide basic information about the device such as OS version and model
name. This data is expected to be used to fill the 'device_info' key in get_capabilities()
above.
:return: dictionary of device information
"""
pass
def commit(self, comment=None):
"""Commit configuration changes
This method will perform the commit operation on a previously loaded
candidate configuration that was loaded using `edit_config()`. If
there is a candidate configuration, it will be committed to the
active configuration. If there is not a candidate configuration, this
method should just silently return.
:return: None
"""
return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
def discard_changes(self):
"""Discard candidate configuration
This method will discard the current candidate configuration if one
is present. If there is no candidate configuration currently loaded,
then this method should just silently return
:returns: None
"""
return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
def rollback(self, rollback_id, commit=True):
"""
:param rollback_id: The commit id to which configuration should be rollbacked
:param commit: Flag to indicate if changes should be committed or not
:return: Returns diff between before and after change.
"""
pass
def copy_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Copies file over scp/sftp to remote device
:param source: Source file path
:param destination: Destination file path on remote device
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
out = scp.put(source, destination)
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.put(source, destination)
def get_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Fetch file over scp/sftp from remote device
:param source: Source file path
:param destination: Destination file path
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
try:
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
scp.get(source, destination)
except EOFError:
# This appears to be benign.
pass
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.get(source, destination)
def get_diff(self, candidate=None, running=None, diff_match=None, diff_ignore_lines=None, path=None, diff_replace=None):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param diff_match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with the running configuration
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param diff_replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration and/or banner diff in json format.
{
'config_diff': ''
}
"""
pass
def run_commands(self, commands=None, check_rc=True):
"""
Execute a list of commands on remote host and return the list of response
:param commands: The list of command that needs to be executed on remote host.
The individual command in list can either be a command string or command dict.
If the command is dict the valid keys are
{
'command': <command to be executed>
'prompt': <expected prompt on executing the command>,
'answer': <answer for the prompt>,
'output': <the format in which command output should be rendered eg: 'json', 'text'>,
'sendonly': <Boolean flag to indicate if it command execution response should be ignored or not>
}
:param check_rc: Boolean flag to check if returned response should be checked for error or not.
If check_rc is False the error output is appended in return response list, else if the
value is True an exception is raised.
:return: List of returned response
"""
pass
def check_edit_config_capability(self, operations, candidate=None, commit=True, replace=None, comment=None):
if not candidate and not replace:
raise ValueError("must provide a candidate or replace to load configuration")
if commit not in (True, False):
raise ValueError("'commit' must be a bool, got %s" % commit)
if replace and not operations['supports_replace']:
raise ValueError("configuration replace is not supported")
if comment and not operations.get('supports_commit_comment', False):
raise ValueError("commit comment is not supported")
if replace and not operations.get('supports_replace', False):
raise ValueError("configuration replace is not supported")
def set_cli_prompt_context(self):
"""
Ensure the command prompt on device is in right mode
:return: None
"""
pass
def _update_cli_prompt_context(self, config_context=None, exit_command='exit'):
"""
Update the cli prompt context to ensure it is in operational mode
:param config_context: It is string value to identify if the current cli prompt ends with config mode prompt
:param exit_command: Command to execute to exit the config mode
:return: None
"""
out = self._connection.get_prompt()
if out is None:
raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received'
u' response window: %s' % self._connection._last_recv_window)
while True:
out = to_text(out, errors='surrogate_then_replace').strip()
if config_context and out.endswith(config_context):
self._connection.queue_message('vvvv', 'wrong context, sending exit to device')
self.send_command(exit_command)
out = self._connection.get_prompt()
else:
break
| 22,719
|
Python
|
.py
| 404
| 44.762376
| 140
| 0.641476
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,404
|
paramiko_ssh.py
|
ansible_ansible/lib/ansible/plugins/connection/paramiko_ssh.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
author: Ansible Core Team
name: paramiko
short_description: Run tasks via Python SSH (paramiko)
description:
- Use the Python SSH implementation (Paramiko) to connect to targets
- The paramiko transport is provided because many distributions, in particular EL6 and before do not support ControlPersist
in their SSH implementations.
- This is needed on the Ansible control machine to be reasonably efficient with connections.
Thus paramiko is faster for most users on these platforms.
Users with ControlPersist capability can consider using -c ssh or configuring the transport in the configuration file.
- This plugin also borrows a lot of settings from the ssh plugin as they both cover the same protocol.
version_added: "0.1"
options:
remote_addr:
description:
- Address of the remote target
default: inventory_hostname
type: string
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_ssh_host
- name: ansible_paramiko_host
port:
description: Remote port to connect to.
type: int
default: 22
ini:
- section: defaults
key: remote_port
- section: paramiko_connection
key: remote_port
version_added: '2.15'
env:
- name: ANSIBLE_REMOTE_PORT
- name: ANSIBLE_REMOTE_PARAMIKO_PORT
version_added: '2.15'
vars:
- name: ansible_port
- name: ansible_ssh_port
- name: ansible_paramiko_port
version_added: '2.15'
keyword:
- name: port
remote_user:
description:
- User to login/authenticate as
- Can be set from the CLI via the C(--user) or C(-u) options.
type: string
vars:
- name: ansible_user
- name: ansible_ssh_user
- name: ansible_paramiko_user
env:
- name: ANSIBLE_REMOTE_USER
- name: ANSIBLE_PARAMIKO_REMOTE_USER
version_added: '2.5'
ini:
- section: defaults
key: remote_user
- section: paramiko_connection
key: remote_user
version_added: '2.5'
keyword:
- name: remote_user
password:
description:
- Secret used to either login the ssh server or as a passphrase for ssh keys that require it
- Can be set from the CLI via the C(--ask-pass) option.
type: string
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
- name: ansible_paramiko_pass
- name: ansible_paramiko_password
version_added: '2.5'
use_rsa_sha2_algorithms:
description:
- Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys
- On paramiko versions older than 2.9, this only affects hostkeys
- For behavior matching paramiko<2.9 set this to V(False)
vars:
- name: ansible_paramiko_use_rsa_sha2_algorithms
ini:
- {key: use_rsa_sha2_algorithms, section: paramiko_connection}
env:
- {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS}
default: True
type: boolean
version_added: '2.14'
host_key_auto_add:
description: 'Automatically add host keys'
env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
ini:
- {key: host_key_auto_add, section: paramiko_connection}
type: boolean
look_for_keys:
default: True
description: 'False to disable searching for private key files in ~/.ssh/'
env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
ini:
- {key: look_for_keys, section: paramiko_connection}
type: boolean
proxy_command:
default: ''
description:
- Proxy information for running the connection via a jumphost.
type: string
env: [{name: ANSIBLE_PARAMIKO_PROXY_COMMAND}]
ini:
- {key: proxy_command, section: paramiko_connection}
vars:
- name: ansible_paramiko_proxy_command
version_added: '2.15'
pty:
default: True
description: 'SUDO usually requires a PTY, True to give a PTY and False to not give a PTY.'
env:
- name: ANSIBLE_PARAMIKO_PTY
ini:
- section: paramiko_connection
key: pty
type: boolean
record_host_keys:
default: True
description: 'Save the host keys to a file'
env: [{name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS}]
ini:
- section: paramiko_connection
key: record_host_keys
type: boolean
host_key_checking:
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
type: boolean
default: True
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
- name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
version_added: '2.5'
ini:
- section: defaults
key: host_key_checking
- section: paramiko_connection
key: host_key_checking
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
- name: ansible_paramiko_host_key_checking
version_added: '2.5'
use_persistent_connections:
description: 'Toggles the use of persistence for connections'
type: boolean
default: False
env:
- name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
ini:
- section: defaults
key: use_persistent_connections
banner_timeout:
type: float
default: 30
version_added: '2.14'
description:
- Configures, in seconds, the amount of time to wait for the SSH
banner to be presented. This option is supported by paramiko
version 1.15.0 or newer.
ini:
- section: paramiko_connection
key: banner_timeout
env:
- name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT
timeout:
type: int
default: 10
description: Number of seconds until the plugin gives up on failing to establish a TCP connection.
ini:
- section: defaults
key: timeout
- section: ssh_connection
key: timeout
version_added: '2.11'
- section: paramiko_connection
key: timeout
version_added: '2.15'
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_SSH_TIMEOUT
version_added: '2.11'
- name: ANSIBLE_PARAMIKO_TIMEOUT
version_added: '2.15'
vars:
- name: ansible_ssh_timeout
version_added: '2.11'
- name: ansible_paramiko_timeout
version_added: '2.15'
cli:
- name: timeout
private_key_file:
description:
- Path to private key file to use for authentication.
type: string
ini:
- section: defaults
key: private_key_file
- section: paramiko_connection
key: private_key_file
version_added: '2.15'
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
- name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE
version_added: '2.15'
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
- name: ansible_paramiko_private_key_file
version_added: '2.15'
cli:
- name: private_key_file
option: '--private-key'
"""
import os
import socket
import tempfile
import traceback
import fcntl
import re
import typing as t
from ansible.module_utils.compat.version import LooseVersion
from binascii import hexlify
from ansible.errors import (
AnsibleAuthenticationFailure,
AnsibleConnectionFailure,
AnsibleError,
AnsibleFileNotFound,
)
from ansible.module_utils.compat.paramiko import PARAMIKO_IMPORT_ERR, paramiko
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display
from ansible.utils.path import makedirs_safe
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
display = Display()
AUTHENTICITY_MSG = """
paramiko: The authenticity of host '%s' can't be established.
The %s key fingerprint is %s.
Are you sure you want to continue connecting (yes/no)?
"""
# SSH Options Regex
SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
MissingHostKeyPolicy: type = object
if paramiko:
MissingHostKeyPolicy = paramiko.MissingHostKeyPolicy
class MyAddPolicy(MissingHostKeyPolicy):
"""
Based on AutoAddPolicy in paramiko so we can determine when keys are added
and also prompt for input.
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def __init__(self, connection: Connection) -> None:
self.connection = connection
self._options = connection._options
def missing_host_key(self, client, hostname, key) -> None:
if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))):
fingerprint = hexlify(key.get_fingerprint())
ktype = key.get_name()
if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
# don't print the prompt string since the user cannot respond
# to the question anyway
raise AnsibleError(AUTHENTICITY_MSG[1:92] % (hostname, ktype, fingerprint))
inp = to_text(
display.prompt_until(AUTHENTICITY_MSG % (hostname, ktype, fingerprint), private=False),
errors='surrogate_or_strict'
)
if inp not in ['yes', 'y', '']:
raise AnsibleError("host connection rejected by user")
key._added_by_ansible_this_time = True
# existing implementation below:
client._host_keys.add(hostname, key.get_name(), key)
# host keys are actually saved in close() function below
# in order to control ordering.
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
SSH_CONNECTION_CACHE: dict[str, paramiko.client.SSHClient] = {}
SFTP_CONNECTION_CACHE: dict[str, paramiko.sftp_client.SFTPClient] = {}
class Connection(ConnectionBase):
""" SSH based connections with Paramiko """
transport = 'paramiko'
_log_channel: str | None = None
def _cache_key(self) -> str:
return "%s__%s__" % (self.get_option('remote_addr'), self.get_option('remote_user'))
def _connect(self) -> Connection:
cache_key = self._cache_key()
if cache_key in SSH_CONNECTION_CACHE:
self.ssh = SSH_CONNECTION_CACHE[cache_key]
else:
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
self._connected = True
return self
def _set_log_channel(self, name: str) -> None:
"""Mimic paramiko.SSHClient.set_log_channel"""
self._log_channel = name
def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]:
proxy_command = self.get_option('proxy_command') or None
sock_kwarg = {}
if proxy_command:
replacers = {
'%h': self.get_option('remote_addr'),
'%p': port,
'%r': self.get_option('remote_user')
}
for find, replace in replacers.items():
proxy_command = proxy_command.replace(find, str(replace))
try:
sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self.get_option('remote_addr'))
except AttributeError:
display.warning('Paramiko ProxyCommand support unavailable. '
'Please upgrade to Paramiko 1.9.0 or newer. '
'Not using configured ProxyCommand')
return sock_kwarg
def _connect_uncached(self) -> paramiko.SSHClient:
""" activates the connection object """
if paramiko is None:
raise AnsibleError("paramiko is not installed: %s" % to_native(PARAMIKO_IMPORT_ERR))
port = self.get_option('port')
display.vvv("ESTABLISH PARAMIKO SSH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.get_option('remote_user'), port, self.get_option('remote_addr')),
host=self.get_option('remote_addr'))
ssh = paramiko.SSHClient()
# Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently
# is keeping or omitting rsa-sha2 algorithms
# default_keys: t.Tuple[str] = ()
paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ())
paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ())
use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms')
disabled_algorithms: t.Dict[str, t.Iterable[str]] = {}
if not use_rsa_sha2_algorithms:
if paramiko_preferred_pubkeys:
disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a)
if paramiko_preferred_hostkeys:
disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a)
# override paramiko's default logger name
if self._log_channel is not None:
ssh.set_log_channel(self._log_channel)
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
if self.get_option('host_key_checking'):
for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
try:
# TODO: check if we need to look at several possible locations, possible for loop
ssh.load_system_host_keys(ssh_known_hosts)
break
except IOError:
pass # file was not found, but not required to function
ssh.load_system_host_keys()
ssh_connect_kwargs = self._parse_proxy_command(port)
ssh.set_missing_host_key_policy(MyAddPolicy(self))
conn_password = self.get_option('password')
allow_agent = True
if conn_password is not None:
allow_agent = False
try:
key_filename = None
if self.get_option('private_key_file'):
key_filename = os.path.expanduser(self.get_option('private_key_file'))
# paramiko 2.2 introduced auth_timeout parameter
if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'):
ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout')
# paramiko 1.15 introduced banner timeout parameter
if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'):
ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout')
ssh.connect(
self.get_option('remote_addr').lower(),
username=self.get_option('remote_user'),
allow_agent=allow_agent,
look_for_keys=self.get_option('look_for_keys'),
key_filename=key_filename,
password=conn_password,
timeout=self.get_option('timeout'),
port=port,
disabled_algorithms=disabled_algorithms,
**ssh_connect_kwargs,
)
except paramiko.ssh_exception.BadHostKeyException as e:
raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname)
except paramiko.ssh_exception.AuthenticationException as e:
msg = 'Failed to authenticate: {0}'.format(to_text(e))
raise AnsibleAuthenticationFailure(msg)
except Exception as e:
msg = to_text(e)
if u"PID check failed" in msg:
raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif u"Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
self.get_option('remote_user'), self.get_options('remote_addr'), port, msg)
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
return ssh
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
""" run a command on the remote host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
bufsize = 4096
try:
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
except Exception as e:
text_e = to_text(e)
msg = u"Failed to open session"
if text_e:
msg += u": %s" % text_e
raise AnsibleConnectionFailure(to_native(msg))
# sudo usually requires a PTY (cf. requiretty option), therefore
# we give it one by default (pty=True in ansible.cfg), and we try
# to initialise from the calling environment when sudoable is enabled
if self.get_option('pty') and sudoable:
chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
display.vvv("EXEC %s" % cmd, host=self.get_option('remote_addr'))
cmd = to_bytes(cmd, errors='surrogate_or_strict')
no_prompt_out = b''
no_prompt_err = b''
become_output = b''
try:
chan.exec_command(cmd)
if self.become and self.become.expect_prompt():
passprompt = False
become_sucess = False
while not (become_sucess or passprompt):
display.debug('Waiting for Privilege Escalation input')
chunk = chan.recv(bufsize)
display.debug("chunk is: %r" % chunk)
if not chunk:
if b'unknown user' in become_output:
n_become_user = to_native(self.become.get_option('become_user'))
raise AnsibleError('user %s does not exist' % n_become_user)
else:
break
# raise AnsibleError('ssh connection closed waiting for password prompt')
become_output += chunk
# need to check every line because we might get lectured
# and we might get the middle of a line in a chunk
for line in become_output.splitlines(True):
if self.become.check_success(line):
become_sucess = True
break
elif self.become.check_password_prompt(line):
passprompt = True
break
if passprompt:
if self.become:
become_pass = self.become.get_option('become_pass')
chan.sendall(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
else:
raise AnsibleError("A password is required but none was supplied")
else:
no_prompt_out += become_output
no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + to_text(become_output))
stdout = b''.join(chan.makefile('rb', bufsize))
stderr = b''.join(chan.makefile_stderr('rb', bufsize))
return (chan.recv_exit_status(), no_prompt_out + stdout, no_prompt_out + stderr)
def put_file(self, in_path: str, out_path: str) -> None:
""" transfer a file from local to remote """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
self.sftp = self.ssh.open_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file to %s" % out_path)
def _connect_sftp(self) -> paramiko.sftp_client.SFTPClient:
cache_key = "%s__%s__" % (self.get_option('remote_addr'), self.get_option('remote_user'))
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
return result
def fetch_file(self, in_path: str, out_path: str) -> None:
""" save a remote file to the specified path """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
try:
self.sftp = self._connect_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % to_native(e))
try:
self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file from %s" % in_path)
def _any_keys_added(self) -> bool:
for hostname, keys in self.ssh._host_keys.items():
for keytype, key in keys.items():
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
return True
return False
def _save_ssh_host_keys(self, filename: str) -> None:
"""
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
don't complain about it :)
"""
if not self._any_keys_added():
return
path = os.path.expanduser("~/.ssh")
makedirs_safe(path)
with open(filename, 'w') as f:
for hostname, keys in self.ssh._host_keys.items():
for keytype, key in keys.items():
# was f.write
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if not added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
for hostname, keys in self.ssh._host_keys.items():
for keytype, key in keys.items():
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
def reset(self) -> None:
if not self._connected:
return
self.close()
self._connect()
def close(self) -> None:
""" terminate the connection """
cache_key = self._cache_key()
SSH_CONNECTION_CACHE.pop(cache_key, None)
SFTP_CONNECTION_CACHE.pop(cache_key, None)
if hasattr(self, 'sftp'):
if self.sftp is not None:
self.sftp.close()
if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
# add any new SSH host keys -- warning -- this could be slow
# (This doesn't acquire the connection lock because it needs
# to exclude only other known_hosts writers, not connections
# that are starting up.)
lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
makedirs_safe(dirname)
KEY_LOCK = open(lockfile, 'w')
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
try:
# just in case any were added recently
self.ssh.load_system_host_keys()
self.ssh._host_keys.update(self.ssh._system_host_keys)
# gather information about the current key file, so
# we can ensure the new file has the correct mode/owner
key_dir = os.path.dirname(self.keyfile)
if os.path.exists(self.keyfile):
key_stat = os.stat(self.keyfile)
mode = key_stat.st_mode
uid = key_stat.st_uid
gid = key_stat.st_gid
else:
mode = 33188
uid = os.getuid()
gid = os.getgid()
# Save the new keys to a temporary file and move it into place
# rather than rewriting the file. We set delete=False because
# the file will be moved into place rather than cleaned up.
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
os.chmod(tmp_keyfile.name, mode & 0o7777)
os.chown(tmp_keyfile.name, uid, gid)
self._save_ssh_host_keys(tmp_keyfile.name)
tmp_keyfile.close()
os.rename(tmp_keyfile.name, self.keyfile)
except Exception:
# unable to save keys, including scenario when key was invalid
# and caught earlier
traceback.print_exc()
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
self.ssh.close()
self._connected = False
| 27,195
|
Python
|
.py
| 588
| 34.435374
| 159
| 0.589787
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,405
|
local.py
|
ansible_ansible/lib/ansible/plugins/connection/local.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: local
short_description: execute on controller
description:
- This connection plugin allows ansible to execute tasks on the Ansible 'controller' instead of on a remote host.
author: ansible (@core)
version_added: historical
extends_documentation_fragment:
- connection_pipelining
notes:
- The remote user is ignored, the user with which the ansible CLI was executed is used instead.
"""
import fcntl
import getpass
import os
import pty
import selectors
import shutil
import subprocess
import typing as t
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
display = Display()
class Connection(ConnectionBase):
""" Local based connections """
transport = 'local'
has_pipelining = True
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super(Connection, self).__init__(*args, **kwargs)
self.cwd = None
try:
self.default_user = getpass.getuser()
except KeyError:
display.vv("Current user (uid=%s) does not seem to exist on this system, leaving user empty." % os.getuid())
self.default_user = ""
def _connect(self) -> Connection:
""" connect to the local host; nothing to do here """
# Because we haven't made any remote connection we're running as
# the local user, rather than as whatever is configured in remote_user.
self._play_context.remote_user = self.default_user
if not self._connected:
display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
self._connected = True
return self
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
""" run a command on the local host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.debug("in local.exec_command()")
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
raise AnsibleError("failed to find the executable specified %s."
" Please verify if the executable exists and re-try." % executable)
display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
display.debug("opening command with Popen()")
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = map(to_bytes, cmd)
master = None
stdin = subprocess.PIPE
if sudoable and self.become and self.become.expect_prompt() and not self.get_option('pipelining'):
# Create a pty if sudoable for privilege escalation that needs it.
# Falls back to using a standard pipe if this fails, which may
# cause the command to fail in certain situations where we are escalating
# privileges or the command otherwise needs a pty.
try:
master, stdin = pty.openpty()
except (IOError, OSError) as e:
display.debug("Unable to open pty: %s" % to_native(e))
p = subprocess.Popen(
cmd,
shell=isinstance(cmd, (text_type, binary_type)),
executable=executable,
cwd=self.cwd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
if master is not None:
os.close(stdin)
display.debug("done running command with Popen()")
if self.become and self.become.expect_prompt() and sudoable:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
become_output = b''
try:
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
events = selector.select(self._play_context.timeout)
if not events:
stdout, stderr = p.communicate()
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
for key, event in events:
if key.fileobj == p.stdout:
chunk = p.stdout.read()
elif key.fileobj == p.stderr:
chunk = p.stderr.read()
if not chunk:
stdout, stderr = p.communicate()
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
become_output += chunk
finally:
selector.close()
if not self.become.check_success(become_output):
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
if master is None:
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
else:
os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
display.debug("getting output with communicate()")
stdout, stderr = p.communicate(in_data)
display.debug("done communicating")
# finally, close the other half of the pty, if it was created
if master:
os.close(master)
display.debug("done with local.exec_command()")
return (p.returncode, stdout, stderr)
def put_file(self, in_path: str, out_path: str) -> None:
""" transfer a file from local to local """
super(Connection, self).put_file(in_path, out_path)
in_path = unfrackpath(in_path, basedir=self.cwd)
out_path = unfrackpath(out_path, basedir=self.cwd)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
try:
shutil.copyfile(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except shutil.Error:
raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_native(in_path), to_native(out_path)))
except IOError as e:
raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
def fetch_file(self, in_path: str, out_path: str) -> None:
""" fetch a file from local to local -- for compatibility """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
self.put_file(in_path, out_path)
def close(self) -> None:
""" terminate the connection; nothing to do here """
self._connected = False
| 8,339
|
Python
|
.py
| 153
| 43.908497
| 144
| 0.634254
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,406
|
ssh.py
|
ansible_ansible/lib/ansible/plugins/connection/ssh.py
|
# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
# Copyright 2017 Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: ssh
short_description: connect via SSH client binary
description:
- This connection plugin allows Ansible to communicate to the target machines through normal SSH command line.
- Ansible does not expose a channel to allow communication between the user and the SSH process to accept
a password manually to decrypt an SSH key when using this connection plugin (which is the default). The
use of C(ssh-agent) is highly recommended.
author: ansible (@core)
extends_documentation_fragment:
- connection_pipelining
version_added: historical
notes:
- This plugin is mostly a wrapper to the ``ssh`` CLI utility and the exact behavior of the options depends on this tool.
This means that the documentation provided here is subject to be overridden by the CLI tool itself.
- Many options default to V(None) here but that only means we do not override the SSH tool's defaults and/or configuration.
For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config).
- The ssh CLI tool uses return code 255 as a 'connection error', this can conflict with commands/tools that
also return 255 as an error code and will look like an 'unreachable' condition or 'connection error' to this plugin.
options:
host:
description: Hostname/IP to connect to.
default: inventory_hostname
type: string
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_ssh_host
- name: delegated_vars['ansible_host']
- name: delegated_vars['ansible_ssh_host']
host_key_checking:
description: Determines if SSH should reject or not a connection after checking host keys.
default: True
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the O(remote_user). Can be supplied as CLI option.
type: string
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
sshpass_prompt:
description:
- Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
- Defaults to C(Enter PIN for) when pkcs11_provider is set.
default: ''
type: string
ini:
- section: 'ssh_connection'
key: 'sshpass_prompt'
env:
- name: ANSIBLE_SSHPASS_PROMPT
vars:
- name: ansible_sshpass_prompt
version_added: '2.10'
ssh_args:
description: Arguments to pass to all SSH CLI tools.
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
type: string
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
vars:
- name: ansible_ssh_args
version_added: '2.7'
ssh_common_args:
description: Common extra args for all SSH CLI tools.
type: string
ini:
- section: 'ssh_connection'
key: 'ssh_common_args'
version_added: '2.7'
env:
- name: ANSIBLE_SSH_COMMON_ARGS
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
cli:
- name: ssh_common_args
default: ''
ssh_executable:
default: ssh
description:
- This defines the location of the SSH binary. It defaults to V(ssh) which will use the first SSH binary available in $PATH.
- This option is usually not required, it might be useful when access to system SSH is restricted,
or when using SSH wrappers to connect to remote hosts.
type: string
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
vars:
- name: ansible_ssh_executable
version_added: '2.7'
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to V(sftp) which will use the first binary available in $PATH.
type: string
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_sftp_executable
version_added: '2.7'
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to V(scp) which will use the first binary available in $PATH.
type: string
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_scp_executable
version_added: '2.7'
scp_extra_args:
description: Extra exclusive to the C(scp) CLI
type: string
vars:
- name: ansible_scp_extra_args
env:
- name: ANSIBLE_SCP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: scp_extra_args
default: ''
sftp_extra_args:
description: Extra exclusive to the C(sftp) CLI
type: string
vars:
- name: ansible_sftp_extra_args
env:
- name: ANSIBLE_SFTP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: sftp_extra_args
default: ''
ssh_extra_args:
description: Extra exclusive to the SSH CLI.
type: string
vars:
- name: ansible_ssh_extra_args
env:
- name: ANSIBLE_SSH_EXTRA_ARGS
version_added: '2.7'
ini:
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: ssh_extra_args
default: ''
reconnection_retries:
description:
- Number of attempts to connect.
- Ansible retries connections only if it gets an SSH error with a return code of 255.
- Any errors with return codes other than 255 indicate an issue with program execution.
default: 0
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
vars:
- name: ansible_ssh_retries
version_added: '2.7'
port:
description: Remote port to connect to.
type: int
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
keyword:
- name: port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the SSH client binary choose the user as it normally.
type: string
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
cli:
- name: user
keyword:
- name: remote_user
pipelining:
env:
- name: ANSIBLE_PIPELINING
- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
- section: ssh_connection
key: pipelining
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication.
type: string
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
cli:
- name: private_key_file
option: '--private-key'
control_path:
description:
- This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution.
- Since 2.3, if null (default), ansible will generate a unique hash. Use ``%(directory)s`` to indicate where to use the control dir path setting.
- Before 2.3 it defaulted to ``control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r``.
- Be aware that this setting is ignored if C(-o ControlPath) is set in ssh args.
type: string
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
vars:
- name: ansible_control_path
version_added: '2.7'
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the ``%(directory)s`` variable for the control path setting.
type: string
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
vars:
- name: ansible_control_path_dir
version_added: '2.7'
sftp_batch_mode:
default: true
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
ssh_transfer_method:
description: Preferred method to use when transferring files over ssh
choices:
sftp: This is the most reliable way to copy things with SSH.
scp: Deprecated in OpenSSH. For OpenSSH >=9.0 you must add an additional option to enable scp C(scp_extra_args="-O").
piped: Creates an SSH pipe with C(dd) on either side to copy the data.
smart: Tries each method in order (sftp > scp > piped), until one succeeds or they all fail.
default: smart
type: string
env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
ini:
- {key: transfer_method, section: ssh_connection}
vars:
- name: ansible_ssh_transfer_method
version_added: '2.12'
use_tty:
version_added: '2.5'
default: true
description: add -tt to ssh commands to force tty allocation.
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
- {key: usetty, section: ssh_connection}
type: bool
vars:
- name: ansible_ssh_use_tty
version_added: '2.7'
timeout:
default: 10
description:
- This is the default amount of time we will wait while establishing an SSH connection.
- It also controls how long we can wait to access reading the connection once established (select on the socket).
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_SSH_TIMEOUT
version_added: '2.11'
ini:
- key: timeout
section: defaults
- key: timeout
section: ssh_connection
version_added: '2.11'
vars:
- name: ansible_ssh_timeout
version_added: '2.11'
cli:
- name: timeout
type: integer
pkcs11_provider:
version_added: '2.12'
default: ""
type: string
description:
- "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so"
- Requires sshpass version 1.06+, sshpass must support the -P option.
env: [{name: ANSIBLE_PKCS11_PROVIDER}]
ini:
- {key: pkcs11_provider, section: ssh_connection}
vars:
- name: ansible_ssh_pkcs11_provider
"""
import collections.abc as c
import errno
import fcntl
import hashlib
import io
import os
import pty
import re
import selectors
import shlex
import subprocess
import time
import typing as t
from functools import wraps
from ansible.errors import (
AnsibleAuthenticationFailure,
AnsibleConnectionFailure,
AnsibleError,
AnsibleFileNotFound,
)
from ansible.module_utils.six import PY3, text_type, binary_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.plugins.shell.powershell import _parse_clixml
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath, makedirs_safe
display = Display()
P = t.ParamSpec('P')
# error messages that indicate 255 return code is not from ssh itself.
b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception
# while invoking a script via -m
b'PHP Parse error:', # Php always returns with error
b'chmod: invalid mode', # chmod, but really only on AIX
b'chmod: A flag or octal number is not correct.', # chmod, other AIX
)
SSHPASS_AVAILABLE = None
SSH_DEBUG = re.compile(r'^debug\d+: .*')
class AnsibleControlPersistBrokenPipeError(AnsibleError):
""" ControlPersist broken pipe """
pass
def _handle_error(
remaining_retries: int,
command: bytes,
return_tuple: tuple[int, bytes, bytes],
no_log: bool,
host: str,
display: Display = display,
) -> None:
# sshpass errors
if command == b'sshpass':
# Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account.
if return_tuple[0] == 5:
msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries)
if remaining_retries <= 0:
msg = 'Invalid/incorrect password:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleAuthenticationFailure(msg)
# sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios.
# No exception is raised, so the connection is retried - except when attempting to use
# sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly.
elif return_tuple[0] in [1, 2, 3, 4, 6]:
msg = 'sshpass error:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
details = to_native(return_tuple[2]).rstrip()
if "sshpass: invalid option -- 'P'" in details:
details = 'Installed sshpass version does not support customized password prompts. ' \
'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
raise AnsibleError('{0} {1}'.format(msg, details))
msg = '{0} {1}'.format(msg, details)
if return_tuple[0] == 255:
SSH_ERROR = True
for signature in b_NOT_SSH_ERRORS:
# 1 == stout, 2 == stderr
if signature in return_tuple[1] or signature in return_tuple[2]:
SSH_ERROR = False
break
if SSH_ERROR:
msg = "Failed to connect to the host via ssh:"
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleConnectionFailure(msg)
# For other errors, no exception is raised so the connection is retried and we only log the messages
if 1 <= return_tuple[0] <= 254:
msg = u"Failed to connect to the host via ssh:"
if no_log:
msg = u'{0} <error censored due to no log>'.format(msg)
else:
msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip())
display.vvv(msg, host=host)
def _ssh_retry(
func: c.Callable[t.Concatenate[Connection, P], tuple[int, bytes, bytes]],
) -> c.Callable[t.Concatenate[Connection, P], tuple[int, bytes, bytes]]:
"""
Decorator to retry ssh/scp/sftp in the case of a connection failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* sshpass returns 5 (invalid password, to prevent account lockouts)
* remaining_tries is < 2
* retries limit reached
"""
@wraps(func)
def wrapped(self: Connection, *args: P.args, **kwargs: P.kwargs) -> tuple[int, bytes, bytes]:
remaining_tries = int(self.get_option('reconnection_retries')) + 1
cmd_summary = u"%s..." % to_text(args[0])
conn_password = self.get_option('password') or self._play_context.password
for attempt in range(remaining_tries):
cmd = t.cast(list[bytes], args[0])
if attempt != 0 and conn_password and isinstance(cmd, list):
# If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
try:
try:
return_tuple = func(self, *args, **kwargs)
# TODO: this should come from task
if self._play_context.no_log:
display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
else:
display.vvv(str(return_tuple), host=self.host)
# 0 = success
# 1-254 = remote command return code
# 255 could be a failure from the ssh command itself
except (AnsibleControlPersistBrokenPipeError):
# Retry one more time because of the ControlPersist broken pipe (see #16731)
cmd = t.cast(list[bytes], args[0])
if conn_password and isinstance(cmd, list):
# This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
return_tuple = func(self, *args, **kwargs)
remaining_retries = remaining_tries - attempt - 1
_handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host)
break
# 5 = Invalid/incorrect password from sshpass
except AnsibleAuthenticationFailure:
# Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries
raise
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause)
else:
msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), "
u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause))
display.vv(msg, host=self.host)
time.sleep(pause)
continue
return return_tuple
return wrapped
class Connection(ConnectionBase):
""" ssh based connections """
transport = 'ssh'
has_pipelining = True
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super(Connection, self).__init__(*args, **kwargs)
# TODO: all should come from get_option(), but not might be set at this point yet
self.host = self._play_context.remote_addr
self.port = self._play_context.port
self.user = self._play_context.remote_user
self.control_path: str | None = None
self.control_path_dir: str | None = None
# Windows operates differently from a POSIX connection/shell plugin,
# we need to set various properties to ensure SSH on Windows continues
# to work
if getattr(self._shell, "_IS_WINDOWS", False):
self.has_native_async = True
self.always_pipeline_modules = True
self.module_implementation_preferences = ('.ps1', '.exe', '')
self.allow_executable = False
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self) -> Connection:
return self
@staticmethod
def _create_control_path(
host: str | None,
port: int | None,
user: str | None,
connection: ConnectionBase | None = None,
pid: int | None = None,
) -> str:
"""Make a hash for the controlpath based on con attributes"""
pstring = '%s-%s-%s' % (host, port, user)
if connection:
pstring += '-%s' % connection
if pid:
pstring += '-%s' % to_text(pid)
m = hashlib.sha1()
m.update(to_bytes(pstring))
digest = m.hexdigest()
cpath = '%(directory)s/' + digest[:10]
return cpath
@staticmethod
def _sshpass_available() -> bool:
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(b_command: list[bytes]) -> tuple[bool, bool]:
"""
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
"""
controlpersist = False
controlpath = False
for b_arg in (a.lower() for a in b_command):
if b'controlpersist' in b_arg:
controlpersist = True
elif b'controlpath' in b_arg:
controlpath = True
return controlpersist, controlpath
def _add_args(self, b_command: list[bytes], b_args: t.Iterable[bytes], explanation: str) -> None:
"""
Adds arguments to the ssh command and displays a caller-supplied explanation of why.
:arg b_command: A list containing the command to add the new arguments to.
This list will be modified by this method.
:arg b_args: An iterable of new arguments to add. This iterable is used
more than once so it must be persistent (ie: a list is okay but a
StringIO would not)
:arg explanation: A text string containing explaining why the arguments
were added. It will be displayed with a high enough verbosity.
.. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
"""
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host)
b_command += b_args
def _build_command(self, binary: str, subsystem: str, *other_args: bytes | str) -> list[bytes]:
"""
Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
wrapped in local ssh shell commands and ready for execution.
:arg binary: actual executable to use to execute command.
:arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names.
:arg other_args: dict of, value pairs passed as arguments to the ssh binary
"""
b_command = []
conn_password = self.get_option('password') or self._play_context.password
#
# First, the command to invoke
#
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
pkcs11_provider = self.get_option("pkcs11_provider")
if conn_password or pkcs11_provider:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program")
if not conn_password and pkcs11_provider:
raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
self.sshpass_pipe = os.pipe()
b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
password_prompt = self.get_option('sshpass_prompt')
if not password_prompt and pkcs11_provider:
# Set default password prompt for pkcs11_provider to make it clear its a PIN
password_prompt = 'Enter PIN for '
if password_prompt:
b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
#
# pkcs11 mode allows the use of Smartcards or Yubikey devices
if conn_password and pkcs11_provider:
self._add_args(b_command,
(b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=publickey",
b"-o", b"PasswordAuthentication=no",
b'-o', to_bytes(u'PKCS11Provider=%s' % pkcs11_provider)),
u'Enable pkcs11')
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
b_args: t.Iterable[bytes]
if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
if conn_password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
b_command += [b'-b', b'-']
if display.verbosity:
b_command.append(b'-' + (b'v' * display.verbosity))
# Next, we add ssh_args
ssh_args = self.get_option('ssh_args')
if ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
# Now we add various arguments that have their own specific settings defined in docs above.
if self.get_option('host_key_checking') is False:
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
self.port = self.get_option('port')
if self.port is not None:
b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
key = self.get_option('private_key_file')
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
if not conn_password:
self._add_args(
b_command, (
b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
b"-o", b"PasswordAuthentication=no"
),
u"ansible_password/ansible_ssh_password not set"
)
self.user = self.get_option('remote_user')
if self.user:
self._add_args(
b_command,
(b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
timeout = self.get_option('timeout')
self._add_args(
b_command,
(b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
u"ANSIBLE_TIMEOUT/timeout set"
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)):
attr = self.get_option(opt)
if attr is not None:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
self._add_args(b_command, b_args, u"Set %s" % opt)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(b_command)
if controlpersist:
self._persistent = True
if not controlpath:
self.control_path_dir = self.get_option('control_path_dir')
cpdir = unfrackpath(self.control_path_dir)
b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
# The directory must exist and be writable.
makedirs_safe(b_cpdir, 0o700)
if not os.access(b_cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
self.control_path = self.get_option('control_path')
if not self.control_path:
self.control_path = self._create_control_path(
self.host,
self.port,
self.user
)
b_args = (b"-o", b'ControlPath="%s"' % to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
# Finally, we add any caller-supplied extras.
if other_args:
b_command += [to_bytes(a) for a in other_args]
return b_command
def _send_initial_data(self, fh: io.IOBase, in_data: bytes, ssh_process: subprocess.Popen) -> None:
"""
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
"""
display.debug(u'Sending initial data')
try:
fh.write(to_bytes(in_data))
fh.close()
except (OSError, IOError) as e:
# The ssh connection may have already terminated at this point, with a more useful error
# Only raise AnsibleConnectionFailure if the ssh process is still alive
time.sleep(0.001)
ssh_process.poll()
if getattr(ssh_process, 'returncode', None) is None:
raise AnsibleConnectionFailure(
'Data could not be sent to remote host "%s". Make sure this host can be reached '
'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
)
display.debug(u'Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p: subprocess.Popen) -> None:
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source: str, state: str, b_chunk: bytes, sudoable: bool) -> tuple[bytes, bytes]:
"""
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
"""
output = []
for b_line in b_chunk.splitlines(True):
display_line = to_text(b_line).rstrip('\r\n')
suppress_output = False
# display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
if SSH_DEBUG.match(display_line):
# skip lines from ssh debug output to avoid false matches
pass
elif self.become.expect_prompt() and self.become.check_password_prompt(b_line):
display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_prompt'] = True
suppress_output = True
elif self.become.success and self.become.check_success(b_line):
display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.become.check_incorrect_password(b_line):
display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_error'] = True
elif sudoable and self.become.check_missing_password(b_line):
display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(b_line)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = b''
if output and not output[-1].endswith(b'\n'):
remainder = output[-1]
output = output[:-1]
return b''.join(output), remainder
def _bare_run(self, cmd: list[bytes], in_data: bytes | None, sudoable: bool = True, checkrc: bool = True) -> tuple[int, bytes, bytes]:
"""
Starts the command and communicates with it until it ends.
"""
# We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen
display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd)
display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = list(map(to_bytes, cmd))
conn_password = self.get_option('password') or self._play_context.password
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'wb', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
try:
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdin = p.stdin # type: ignore[assignment] # stdin will be set and not None due to the calls above
except (OSError, IOError) as e:
raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if conn_password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
except OSError as e:
# Ignore broken pipe errors if the sshpass process has exited.
if e.errno != errno.EPIPE or p.poll() is None:
raise
os.close(self.sshpass_pipe[1])
#
# SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
prompt = getattr(self.become, 'prompt', None)
if prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt)))
elif self.become and self.become.success:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success)))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
b_stdout = b_stderr = b''
b_tmp_stdout = b_tmp_stderr = b''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self.get_option('timeout')
for fd in (p.stdout, p.stderr):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# TODO: bcoca would like to use SelectSelector() when open
# select is faster when filehandles is low and we only ever handle 1.
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
# If we can send initial data without waiting for anything, we do so
# before we start polling
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
try:
while True:
poll = p.poll()
events = selector.select(timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not events:
# We timed out
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if poll is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
display.vvvvv(f'SSH: Timeout ({timeout}s) waiting for the output', host=self.host)
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
for key, event in events:
if key.fileobj == p.stdout:
b_chunk = p.stdout.read()
if b_chunk == b'':
# stdout has been closed, stop watching it
selector.unregister(p.stdout)
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout, lower the select timeout
# to reduce the time wasted selecting on stderr if we observe
# that the process has not yet existed after this EOF. Otherwise
# we may spend a long timeout period waiting for an EOF that is
# not going to arrive until the persisted connection closes.
timeout = 1
b_tmp_stdout += b_chunk
display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
elif key.fileobj == p.stderr:
b_chunk = p.stderr.read()
if b_chunk == b'':
# stderr has been closed, stop watching it
selector.unregister(p.stderr)
b_tmp_stderr += b_chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if b_tmp_stdout:
b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
b_stdout += b_output
b_tmp_stdout = b_unprocessed
if b_tmp_stderr:
b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
b_stderr += b_output
b_tmp_stderr = b_unprocessed
else:
b_stdout += b_tmp_stdout
b_stderr += b_tmp_stderr
b_tmp_stdout = b_tmp_stderr = b''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug(u'Sending become_password in response to prompt')
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
# On python3 stdin is a BufferedWriter, and we don't have a guarantee
# that the write will happen without a flush
stdin.flush()
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.vvv(u'Escalation succeeded', host=self.host)
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.vvv(u'Escalation failed', host=self.host)
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
elif self._flags['become_nopasswd_error']:
display.vvv(u'Escalation requires password', host=self.host)
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self.become.name)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.vvv(u'Escalation prompt repeated', host=self.host)
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if poll is not None:
if not selector.get_map() or not events:
break
# We should not see further writes to the stdout/stderr file
# descriptors after the process has closed, set the select
# timeout to gather any last writes we may have missed.
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus no longer watching any file
# descriptors), we can just wait for it to exit.
elif not selector.get_map():
p.wait()
break
# Otherwise there may still be outstanding data to read.
finally:
selector.close()
# close stdin, stdout, and stderr after process is terminated and
# stdout/stderr are read completely (see also issues #848, #64768).
stdin.close()
p.stdout.close()
p.stderr.close()
if self.get_option('host_key_checking'):
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
'(or ssh_args in [ssh_connection] section of the config file) before running again')
# If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
# we raise a special exception so that we can retry a connection.
controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
if p.returncode == 255:
additional = to_native(b_stderr)
if controlpersist_broken_pipe:
raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional)
elif in_data and checkrc:
raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s'
% (self.host, additional))
return (p.returncode, b_stdout, b_stderr)
@_ssh_retry
def _run(self, cmd: list[bytes], in_data: bytes | None, sudoable: bool = True, checkrc: bool = True) -> tuple[int, bytes, bytes]:
"""Wrapper around _bare_run that retries the connection
"""
return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc)
@_ssh_retry
def _file_transport_command(self, in_path: str, out_path: str, sftp_action: str) -> tuple[int, bytes, bytes]:
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
smart_methods = ['sftp', 'scp', 'piped']
# Windows does not support dd so we cannot use the piped method
if getattr(self._shell, "_IS_WINDOWS", False):
smart_methods.remove('piped')
# Transfer methods to try
methods = []
# Use the transfer_method option if set
ssh_transfer_method = self.get_option('ssh_transfer_method')
if ssh_transfer_method == 'smart':
methods = smart_methods
else:
methods = [ssh_transfer_method]
for method in methods:
returncode = stdout = stderr = None
if method == 'sftp':
cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host))
in_data = u"{0} {1} {2}\n".format(sftp_action, shlex.quote(in_path), shlex.quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'scp':
scp = self.get_option('scp_executable')
if sftp_action == 'get':
cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
else:
cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
in_data = None
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'piped':
if sftp_action == 'get':
# we pass sudoable=False to disable pty allocation, which
# would end up mixing stdout/stderr and screwing with newlines
(returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (self._shell.quote(in_path), BUFSIZE), sudoable=False)
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
out_file.write(stdout)
else:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f:
in_data = to_bytes(f.read(), nonstring='passthru')
if not in_data:
count = ' count=0'
else:
count = ''
(returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False)
# Check the return code and rollover to next method if failed
if returncode == 0:
return (returncode, stdout, stderr)
else:
# If not in smart mode, the data will be printed by the raise below
if len(methods) > 1:
display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
display.debug(u'%s' % to_text(stdout))
display.debug(u'%s' % to_text(stderr))
if returncode == 255:
raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
else:
raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def _escape_win_path(self, path: str) -> str:
""" converts a Windows path to one that's supported by SFTP and SCP """
# If using a root path then we need to start with /
prefix = ""
if re.match(r'^\w{1}:', path):
prefix = "/"
# Convert all '\' to '/'
return "%s%s" % (prefix, path.replace("\\", "/"))
#
# Main public methods
#
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
""" run a command on the remote host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
self.host = self.get_option('host') or self._play_context.remote_addr
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self.host)
if getattr(self._shell, "_IS_WINDOWS", False):
# Become method 'runas' is done in the wrapper that is executed,
# need to disable sudoable so the bare_run is not waiting for a
# prompt that will not occur
sudoable = False
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_executable = self.get_option('ssh_executable')
# -tt can cause various issues in some environments so allow the user
# to disable it as a troubleshooting method.
use_tty = self.get_option('use_tty')
args: tuple[str, ...]
if not in_data and sudoable and use_tty:
args = ('-tt', self.host, cmd)
else:
args = (self.host, cmd)
cmd = self._build_command(ssh_executable, 'ssh', *args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
# When running on Windows, stderr may contain CLIXML encoded output
if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
stderr = _parse_clixml(stderr)
return (returncode, stdout, stderr)
def put_file(self, in_path: str, out_path: str) -> tuple[int, bytes, bytes]: # type: ignore[override] # Used by tests and would break API
""" transfer a file from local to remote """
super(Connection, self).put_file(in_path, out_path)
self.host = self.get_option('host') or self._play_context.remote_addr
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
if getattr(self._shell, "_IS_WINDOWS", False):
out_path = self._escape_win_path(out_path)
return self._file_transport_command(in_path, out_path, 'put')
def fetch_file(self, in_path: str, out_path: str) -> tuple[int, bytes, bytes]: # type: ignore[override] # Used by tests and would break API
""" fetch a file from remote to local """
super(Connection, self).fetch_file(in_path, out_path)
self.host = self.get_option('host') or self._play_context.remote_addr
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# need to add / if path is rooted
if getattr(self._shell, "_IS_WINDOWS", False):
in_path = self._escape_win_path(in_path)
return self._file_transport_command(in_path, out_path, 'get')
def reset(self) -> None:
run_reset = False
self.host = self.get_option('host') or self._play_context.remote_addr
# If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
# only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set
# 'check' will determine this.
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host)
display.vvv(u'sending connection check: %s' % to_text(cmd), host=self.host)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.vvv(u"No connection to reset: %s" % to_text(stderr), host=self.host)
else:
run_reset = True
if run_reset:
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host)
display.vvv(u'sending connection stop: %s' % to_text(cmd), host=self.host)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.warning(u"Failed to reset connection:%s" % to_text(stderr))
self.close()
def close(self) -> None:
self._connected = False
| 62,440
|
Python
|
.py
| 1,213
| 38.507832
| 157
| 0.580289
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,407
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/connection/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017, Peter Sprygada <psprygad@redhat.com>
# (c) 2017 Ansible Project
from __future__ import annotations
import collections.abc as c
import fcntl
import io
import os
import shlex
import typing as t
from abc import abstractmethod
from functools import wraps
from ansible import constants as C
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.playbook.play_context import PlayContext
from ansible.plugins import AnsiblePlugin
from ansible.plugins.become import BecomeBase
from ansible.plugins.shell import ShellBase
from ansible.utils.display import Display
from ansible.plugins.loader import connection_loader, get_shell_plugin
from ansible.utils.path import unfrackpath
display = Display()
__all__ = ['ConnectionBase', 'ensure_connect']
BUFSIZE = 65536
P = t.ParamSpec('P')
T = t.TypeVar('T')
def ensure_connect(
func: c.Callable[t.Concatenate[ConnectionBase, P], T],
) -> c.Callable[t.Concatenate[ConnectionBase, P], T]:
@wraps(func)
def wrapped(self: ConnectionBase, *args: P.args, **kwargs: P.kwargs) -> T:
if not self._connected:
self._connect()
return func(self, *args, **kwargs)
return wrapped
class ConnectionBase(AnsiblePlugin):
"""
A base class for connections to contain common code.
"""
has_pipelining = False
has_native_async = False # eg, winrm
always_pipeline_modules = False # eg, winrm
has_tty = True # for interacting with become plugins
# When running over this connection type, prefer modules written in a certain language
# as discovered by the specified file extension. An empty string as the
# language means any language.
module_implementation_preferences = ('',) # type: t.Iterable[str]
allow_executable = True
# the following control whether or not the connection supports the
# persistent connection framework or not
supports_persistence = False
force_persistence = False
default_user: str | None = None
def __init__(
self,
play_context: PlayContext,
new_stdin: io.TextIOWrapper | None = None,
shell: ShellBase | None = None,
*args: t.Any,
**kwargs: t.Any,
) -> None:
super(ConnectionBase, self).__init__()
# All these hasattrs allow subclasses to override these parameters
if not hasattr(self, '_play_context'):
# Backwards compat: self._play_context isn't really needed, using set_options/get_option
self._play_context = play_context
# Delete once the deprecation period is over for WorkerProcess._new_stdin
if not hasattr(self, '__new_stdin'):
self.__new_stdin = new_stdin
if not hasattr(self, '_display'):
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self.success_key = None
self.prompt = None
self._connected = False
self._socket_path: str | None = None
# helper plugins
self._shell = shell
# we always must have shell
if not self._shell:
shell_type = play_context.shell if play_context.shell else getattr(self, '_shell_type', None)
self._shell = get_shell_plugin(shell_type=shell_type, executable=self._play_context.executable)
self.become: BecomeBase | None = None
@property
def _new_stdin(self) -> io.TextIOWrapper | None:
display.deprecated(
"The connection's stdin object is deprecated. "
"Call display.prompt_until(msg) instead.",
version='2.19',
)
return self.__new_stdin
def set_become_plugin(self, plugin: BecomeBase) -> None:
self.become = plugin
@property
def connected(self) -> bool:
"""Read-only property holding whether the connection to the remote host is active or closed."""
return self._connected
@property
def socket_path(self) -> str | None:
"""Read-only property holding the connection socket path for this remote host"""
return self._socket_path
@staticmethod
def _split_ssh_args(argstring: str) -> list[str]:
"""
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
the argument list. The list will not contain any empty elements.
"""
# In Python3, shlex.split doesn't work on a byte string.
return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
@property
@abstractmethod
def transport(self) -> str:
"""String used to identify this Connection class from other classes"""
pass
@abstractmethod
def _connect(self: T) -> T:
"""Connect to the host we've been initialized with"""
@ensure_connect
@abstractmethod
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
"""Run a command on the remote host.
:arg cmd: byte string containing the command
:kwarg in_data: If set, this data is passed to the command's stdin.
This is used to implement pipelining. Currently not all
connection plugins implement pipelining.
:kwarg sudoable: Tell the connection plugin if we're executing
a command via a privilege escalation mechanism. This may affect
how the connection plugin returns data. Note that not all
connections can handle privilege escalation.
:returns: a tuple of (return code, stdout, stderr) The return code is
an int while stdout and stderr are both byte strings.
When a command is executed, it goes through multiple commands to get
there. It looks approximately like this::
[LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
:LocalShell: Is optional. It is run locally to invoke the
``Connection Command``. In most instances, the
``ConnectionCommand`` can be invoked directly instead. The ssh
connection plugin which can have values that need expanding
locally specified via ssh_args is the sole known exception to
this. Shell metacharacters in the command itself should be
processed on the remote machine, not on the local machine so no
shell is needed on the local machine. (Example, ``/bin/sh``)
:ConnectionCommand: This is the command that connects us to the remote
machine to run the rest of the command. ``ansible_user``,
``ansible_ssh_host`` and so forth are fed to this piece of the
command to connect to the correct host (Examples ``ssh``,
``chroot``)
:UsersLoginShell: This shell may or may not be created depending on
the ConnectionCommand used by the connection plugin. This is the
shell that the ``ansible_user`` has configured as their login
shell. In traditional UNIX parlance, this is the last field of
a user's ``/etc/passwd`` entry We do not specifically try to run
the ``UsersLoginShell`` when we connect. Instead it is implicit
in the actions that the ``ConnectionCommand`` takes when it
connects to a remote machine. ``ansible_shell_type`` may be set
to inform ansible of differences in how the ``UsersLoginShell``
handles things like quoting if a shell has different semantics
than the Bourne shell.
:ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
``ansible_shell_executable`` or via
``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
We explicitly invoke this shell so that we have predictable
quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
settable by the user because some sudo setups may only allow
invoking a specific shell. (For instance, ``/bin/bash`` may be
allowed but ``/bin/sh``, our default, may not). We invoke this
twice, once after the ``ConnectionCommand`` and once after the
``BecomeCommand``. After the ConnectionCommand, this is run by
the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
:BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
privilege escalation. Setting this up is performed by the action
plugin prior to running ``exec_command``. So we just get passed
:param:`cmd` which has the BecomeCommand already added.
(Examples: sudo, su) If we have a BecomeCommand then we will
invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
have a consistent view of quoting.
:Command: Is the command we're actually trying to run remotely.
(Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
"""
pass
@ensure_connect
@abstractmethod
def put_file(self, in_path: str, out_path: str) -> None:
"""Transfer a file from local to remote"""
pass
@ensure_connect
@abstractmethod
def fetch_file(self, in_path: str, out_path: str) -> None:
"""Fetch a file from remote to local; callers are expected to have pre-created the directory chain for out_path"""
pass
@abstractmethod
def close(self) -> None:
"""Terminate the connection"""
pass
def connection_lock(self) -> None:
f = self._play_context.connection_lockfd
display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
fcntl.lockf(f, fcntl.LOCK_EX)
display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def connection_unlock(self) -> None:
f = self._play_context.connection_lockfd
fcntl.lockf(f, fcntl.LOCK_UN)
display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def reset(self) -> None:
display.warning("Reset is not implemented for this connection")
def update_vars(self, variables: dict[str, t.Any]) -> None:
"""
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
"""
for varname in C.COMMON_CONNECTION_VARS:
value = None
if varname in variables:
# dont update existing
continue
elif 'password' in varname or 'passwd' in varname:
# no secrets!
continue
elif varname == 'ansible_connection':
# its me mom!
value = self._load_name
elif varname == 'ansible_shell_type' and self._shell:
# its my cousin ...
value = self._shell._load_name
else:
# deal with generic options if the plugin supports em (for example not all connections have a remote user)
options = C.config.get_plugin_options_from_var('connection', self._load_name, varname)
if options:
value = self.get_option(options[0]) # for these variables there should be only one option
elif 'become' not in varname:
# fallback to play_context, unless become related TODO: in the end, should come from task/play and not pc
for prop, var_list in C.MAGIC_VARIABLE_MAPPING.items():
if varname in var_list:
try:
value = getattr(self._play_context, prop)
break
except AttributeError:
# It was not defined; fine to ignore
continue
if value is not None:
display.debug('Set connection var {0} to {1}'.format(varname, value))
variables[varname] = value
class NetworkConnectionBase(ConnectionBase):
"""
A base class for network-style connections.
"""
force_persistence = True
# Do not use _remote_is_local in other connections
_remote_is_local = True
def __init__(
self,
play_context: PlayContext,
new_stdin: io.TextIOWrapper | None = None,
*args: t.Any,
**kwargs: t.Any,
) -> None:
super(NetworkConnectionBase, self).__init__(play_context, new_stdin, *args, **kwargs)
self._messages: list[tuple[str, str]] = []
self._conn_closed = False
self._network_os = self._play_context.network_os
self._local = connection_loader.get('local', play_context, '/dev/null')
self._local.set_options()
self._sub_plugin: dict[str, t.Any] = {}
self._cached_variables = (None, None, None)
# reconstruct the socket_path and set instance values accordingly
self._ansible_playbook_pid = kwargs.get('ansible_playbook_pid')
self._update_connection_state()
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if not name.startswith('_'):
plugin = self._sub_plugin.get('obj')
if plugin:
method = getattr(plugin, name, None)
if method is not None:
return method
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
return self._local.exec_command(cmd, in_data, sudoable)
def queue_message(self, level: str, message: str) -> None:
"""
Adds a message to the queue of messages waiting to be pushed back to the controller process.
:arg level: A string which can either be the name of a method in display, or 'log'. When
the messages are returned to task_executor, a value of log will correspond to
``display.display(message, log_only=True)``, while another value will call ``display.[level](message)``
"""
self._messages.append((level, message))
def pop_messages(self) -> list[tuple[str, str]]:
messages, self._messages = self._messages, []
return messages
def put_file(self, in_path: str, out_path: str) -> None:
"""Transfer a file from local to remote"""
return self._local.put_file(in_path, out_path)
def fetch_file(self, in_path: str, out_path: str) -> None:
"""Fetch a file from remote to local"""
return self._local.fetch_file(in_path, out_path)
def reset(self) -> None:
"""
Reset the connection
"""
if self._socket_path:
self.queue_message('vvvv', 'resetting persistent connection for socket_path %s' % self._socket_path)
self.close()
self.queue_message('vvvv', 'reset call on connection instance')
def close(self) -> None:
self._conn_closed = True
if self._connected:
self._connected = False
def set_options(
self,
task_keys: dict[str, t.Any] | None = None,
var_options: dict[str, t.Any] | None = None,
direct: dict[str, t.Any] | None = None,
) -> None:
super(NetworkConnectionBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
if self.get_option('persistent_log_messages'):
warning = "Persistent connection logging is enabled for %s. This will log ALL interactions" % self._play_context.remote_addr
logpath = getattr(C, 'DEFAULT_LOG_PATH')
if logpath is not None:
warning += " to %s" % logpath
self.queue_message('warning', "%s and WILL NOT redact sensitive configuration like passwords. USE WITH CAUTION!" % warning)
if self._sub_plugin.get('obj') and self._sub_plugin.get('type') != 'external':
try:
self._sub_plugin['obj'].set_options(task_keys=task_keys, var_options=var_options, direct=direct)
except AttributeError:
pass
def _update_connection_state(self) -> None:
"""
Reconstruct the connection socket_path and check if it exists
If the socket path exists then the connection is active and set
both the _socket_path value to the path and the _connected value
to True. If the socket path doesn't exist, leave the socket path
value to None and the _connected value to False
"""
ssh = connection_loader.get('ssh', class_only=True)
control_path = ssh._create_control_path(
self._play_context.remote_addr, self._play_context.port,
self._play_context.remote_user, self._play_context.connection,
self._ansible_playbook_pid
)
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
socket_path = unfrackpath(control_path % dict(directory=tmp_path))
if os.path.exists(socket_path):
self._connected = True
self._socket_path = socket_path
def _log_messages(self, message: str) -> None:
if self.get_option('persistent_log_messages'):
self.queue_message('log', message)
| 17,947
|
Python
|
.py
| 352
| 41.289773
| 140
| 0.636452
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,408
|
psrp.py
|
ansible_ansible/lib/ansible/plugins/connection/psrp.py
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
author: Ansible Core Team
name: psrp
short_description: Run tasks over Microsoft PowerShell Remoting Protocol
description:
- Run commands or put/fetch on a target via PSRP (WinRM plugin)
- This is similar to the P(ansible.builtin.winrm#connection) connection plugin which uses the same
underlying transport but instead runs in a PowerShell interpreter.
version_added: "2.7"
requirements:
- pypsrp>=0.4.0, <1.0.0 (Python library)
extends_documentation_fragment:
- connection_pipelining
options:
# transport options
remote_addr:
description:
- The hostname or IP address of the remote host.
default: inventory_hostname
type: str
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_psrp_host
remote_user:
description:
- The user to log in as.
type: str
vars:
- name: ansible_user
- name: ansible_psrp_user
keyword:
- name: remote_user
remote_password:
description: Authentication password for the O(remote_user). Can be supplied as CLI option.
type: str
vars:
- name: ansible_password
- name: ansible_winrm_pass
- name: ansible_winrm_password
aliases:
- password # Needed for --ask-pass to come through on delegation
port:
description:
- The port for PSRP to connect on the remote target.
- Default is V(5986) if O(protocol) is not defined or is V(https),
otherwise the port is V(5985).
type: int
vars:
- name: ansible_port
- name: ansible_psrp_port
keyword:
- name: port
protocol:
description:
- Set the protocol to use for the connection.
- Default is V(https) if O(port) is not defined or O(port) is not V(5985).
choices:
- http
- https
type: str
vars:
- name: ansible_psrp_protocol
path:
description:
- The URI path to connect to.
type: str
vars:
- name: ansible_psrp_path
default: 'wsman'
auth:
description:
- The authentication protocol to use when authenticating the remote user.
- The default, V(negotiate), will attempt to use Kerberos (V(kerberos)) if it is
available and fall back to NTLM (V(ntlm)) if it isn't.
type: str
vars:
- name: ansible_psrp_auth
choices:
- basic
- certificate
- negotiate
- kerberos
- ntlm
- credssp
default: negotiate
cert_validation:
description:
- Whether to validate the remote server's certificate or not.
- Set to V(ignore) to not validate any certificates.
- O(ca_cert) can be set to the path of a PEM certificate chain to
use in the validation.
choices:
- validate
- ignore
default: validate
type: str
vars:
- name: ansible_psrp_cert_validation
ca_cert:
description:
- The path to a PEM certificate chain to use when validating the server's
certificate.
- This value is ignored if O(cert_validation) is set to V(ignore).
type: path
vars:
- name: ansible_psrp_cert_trust_path
- name: ansible_psrp_ca_cert
aliases: [ cert_trust_path ]
connection_timeout:
description:
- The connection timeout for making the request to the remote host.
- This is measured in seconds.
type: int
vars:
- name: ansible_psrp_connection_timeout
default: 30
read_timeout:
description:
- The read timeout for receiving data from the remote host.
- This value must always be greater than O(operation_timeout).
- This option requires pypsrp >= 0.3.
- This is measured in seconds.
type: int
vars:
- name: ansible_psrp_read_timeout
default: 30
version_added: '2.8'
reconnection_retries:
description:
- The number of retries on connection errors.
type: int
vars:
- name: ansible_psrp_reconnection_retries
default: 0
version_added: '2.8'
reconnection_backoff:
description:
- The backoff time to use in between reconnection attempts.
(First sleeps X, then sleeps 2*X, then sleeps 4*X, ...)
- This is measured in seconds.
- The C(ansible_psrp_reconnection_backoff) variable was added in Ansible
2.9.
type: int
vars:
- name: ansible_psrp_connection_backoff
- name: ansible_psrp_reconnection_backoff
default: 2
version_added: '2.8'
message_encryption:
description:
- Controls the message encryption settings, this is different from TLS
encryption when O(protocol) is V(https).
- Only the auth protocols V(negotiate), V(kerberos), V(ntlm), and
V(credssp) can do message encryption. The other authentication protocols
only support encryption when V(protocol) is set to V(https).
- V(auto) means means message encryption is only used when not using
TLS/HTTPS.
- V(always) is the same as V(auto) but message encryption is always used
even when running over TLS/HTTPS.
- V(never) disables any encryption checks that are in place when running
over HTTP and disables any authentication encryption processes.
type: str
vars:
- name: ansible_psrp_message_encryption
choices:
- auto
- always
- never
default: auto
proxy:
description:
- Set the proxy URL to use when connecting to the remote host.
vars:
- name: ansible_psrp_proxy
type: str
ignore_proxy:
description:
- Will disable any environment proxy settings and connect directly to the
remote host.
- This option is ignored if O(proxy) is set.
vars:
- name: ansible_psrp_ignore_proxy
type: bool
default: false
# auth options
certificate_key_pem:
description:
- The local path to an X509 certificate key to use with certificate auth.
type: path
vars:
- name: ansible_psrp_certificate_key_pem
certificate_pem:
description:
- The local path to an X509 certificate to use with certificate auth.
type: path
vars:
- name: ansible_psrp_certificate_pem
credssp_auth_mechanism:
description:
- The sub authentication mechanism to use with CredSSP auth.
- When V(auto), both Kerberos and NTLM is attempted with kerberos being
preferred.
type: str
choices:
- auto
- kerberos
- ntlm
default: auto
vars:
- name: ansible_psrp_credssp_auth_mechanism
credssp_disable_tlsv1_2:
description:
- Disables the use of TLSv1.2 on the CredSSP authentication channel.
- This should not be set to V(yes) unless dealing with a host that does not
have TLSv1.2.
default: false
type: bool
vars:
- name: ansible_psrp_credssp_disable_tlsv1_2
credssp_minimum_version:
description:
- The minimum CredSSP server authentication version that will be accepted.
- Set to V(5) to ensure the server has been patched and is not vulnerable
to CVE 2018-0886.
default: 2
type: int
vars:
- name: ansible_psrp_credssp_minimum_version
negotiate_delegate:
description:
- Allow the remote user the ability to delegate it's credentials to another
server, i.e. credential delegation.
- Only valid when Kerberos was the negotiated auth or was explicitly set as
the authentication.
- Ignored when NTLM was the negotiated auth.
type: bool
vars:
- name: ansible_psrp_negotiate_delegate
negotiate_hostname_override:
description:
- Override the remote hostname when searching for the host in the Kerberos
lookup.
- This allows Ansible to connect over IP but authenticate with the remote
server using it's DNS name.
- Only valid when Kerberos was the negotiated auth or was explicitly set as
the authentication.
- Ignored when NTLM was the negotiated auth.
type: str
vars:
- name: ansible_psrp_negotiate_hostname_override
negotiate_send_cbt:
description:
- Send the Channel Binding Token (CBT) structure when authenticating.
- CBT is used to provide extra protection against Man in the Middle C(MitM)
attacks by binding the outer transport channel to the auth channel.
- CBT is not used when using just C(HTTP), only C(HTTPS).
default: true
type: bool
vars:
- name: ansible_psrp_negotiate_send_cbt
negotiate_service:
description:
- Override the service part of the SPN used during Kerberos authentication.
- Only valid when Kerberos was the negotiated auth or was explicitly set as
the authentication.
- Ignored when NTLM was the negotiated auth.
default: WSMAN
type: str
vars:
- name: ansible_psrp_negotiate_service
# protocol options
operation_timeout:
description:
- Sets the WSMan timeout for each operation.
- This is measured in seconds.
- This should not exceed the value for O(connection_timeout).
type: int
vars:
- name: ansible_psrp_operation_timeout
default: 20
max_envelope_size:
description:
- Sets the maximum size of each WSMan message sent to the remote host.
- This is measured in bytes.
- Defaults to C(150KiB) for compatibility with older hosts.
type: int
vars:
- name: ansible_psrp_max_envelope_size
default: 153600
configuration_name:
description:
- The name of the PowerShell configuration endpoint to connect to.
type: str
vars:
- name: ansible_psrp_configuration_name
default: Microsoft.PowerShell
"""
import base64
import json
import logging
import os
import typing as t
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import ShellModule as PowerShellPlugin
from ansible.plugins.shell.powershell import _common_args
from ansible.utils.display import Display
from ansible.utils.hashing import sha1
HAS_PYPSRP = True
PYPSRP_IMP_ERR = None
try:
from pypsrp.complex_objects import GenericComplexObject, PSInvocationState, RunspacePoolState
from pypsrp.exceptions import AuthenticationError, WinRMError
from pypsrp.host import PSHost, PSHostUserInterface
from pypsrp.powershell import PowerShell, RunspacePool
from pypsrp.wsman import WSMan
from requests.exceptions import ConnectionError, ConnectTimeout
except ImportError as err:
HAS_PYPSRP = False
PYPSRP_IMP_ERR = err
display = Display()
class Connection(ConnectionBase):
transport = 'psrp'
module_implementation_preferences = ('.ps1', '.exe', '')
allow_executable = False
has_pipelining = True
# Satisfies mypy as this connection only ever runs with this plugin
_shell: PowerShellPlugin
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
self.always_pipeline_modules = True
self.has_native_async = True
self.runspace: RunspacePool | None = None
self.host: PSHost | None = None
self._last_pipeline: PowerShell | None = None
self._shell_type = 'powershell'
super(Connection, self).__init__(*args, **kwargs)
if not C.DEFAULT_DEBUG:
logging.getLogger('pypsrp').setLevel(logging.WARNING)
logging.getLogger('requests_credssp').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
def _connect(self) -> Connection:
if not HAS_PYPSRP:
raise AnsibleError("pypsrp or dependencies are not installed: %s"
% to_native(PYPSRP_IMP_ERR))
super(Connection, self)._connect()
self._build_kwargs()
display.vvv("ESTABLISH PSRP CONNECTION FOR USER: %s ON PORT %s TO %s" %
(self._psrp_user, self._psrp_port, self._psrp_host),
host=self._psrp_host)
if not self.runspace:
connection = WSMan(**self._psrp_conn_kwargs)
# create our pseudo host to capture the exit code and host output
host_ui = PSHostUserInterface()
self.host = PSHost(None, None, False, "Ansible PSRP Host", None,
host_ui, None)
self.runspace = RunspacePool(
connection, host=self.host,
configuration_name=self._psrp_configuration_name
)
display.vvvvv(
"PSRP OPEN RUNSPACE: auth=%s configuration=%s endpoint=%s" %
(self._psrp_auth, self._psrp_configuration_name,
connection.transport.endpoint), host=self._psrp_host
)
try:
self.runspace.open()
except AuthenticationError as e:
raise AnsibleConnectionFailure("failed to authenticate with "
"the server: %s" % to_native(e))
except WinRMError as e:
raise AnsibleConnectionFailure(
"psrp connection failure during runspace open: %s"
% to_native(e)
)
except (ConnectionError, ConnectTimeout) as e:
raise AnsibleConnectionFailure(
"Failed to connect to the host via PSRP: %s"
% to_native(e)
)
self._connected = True
self._last_pipeline = None
return self
def reset(self) -> None:
if not self._connected:
self.runspace = None
return
# Try out best to ensure the runspace is closed to free up server side resources
try:
self.close()
except Exception as e:
# There's a good chance the connection was already closed so just log the error and move on
display.debug("PSRP reset - failed to closed runspace: %s" % to_text(e))
display.vvvvv("PSRP: Reset Connection", host=self._psrp_host)
self.runspace = None
self._connect()
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
super(Connection, self).exec_command(cmd, in_data=in_data,
sudoable=sudoable)
pwsh_in_data: bytes | str | None = None
if cmd.startswith(" ".join(_common_args) + " -EncodedCommand"):
# This is a PowerShell script encoded by the shell plugin, we will
# decode the script and execute it in the runspace instead of
# starting a new interpreter to save on time
b_command = base64.b64decode(cmd.split(" ")[-1])
script = to_text(b_command, 'utf-16-le')
pwsh_in_data = to_text(in_data, errors="surrogate_or_strict", nonstring="passthru")
if pwsh_in_data and isinstance(pwsh_in_data, str) and pwsh_in_data.startswith("#!"):
# ANSIBALLZ wrapper, we need to get the interpreter and execute
# that as the script - note this won't work as basic.py relies
# on packages not available on Windows, once fixed we can enable
# this path
interpreter = to_native(pwsh_in_data.splitlines()[0][2:])
# script = "$input | &'%s' -" % interpreter
raise AnsibleError("cannot run the interpreter '%s' on the psrp "
"connection plugin" % interpreter)
# call build_module_command to get the bootstrap wrapper text
bootstrap_wrapper = self._shell.build_module_command('', '', '')
if bootstrap_wrapper == cmd:
# Do not display to the user each invocation of the bootstrap wrapper
display.vvv("PSRP: EXEC (via pipeline wrapper)")
else:
display.vvv("PSRP: EXEC %s" % script, host=self._psrp_host)
else:
# In other cases we want to execute the cmd as the script. We add on the 'exit $LASTEXITCODE' to ensure the
# rc is propagated back to the connection plugin.
script = to_text(u"%s\nexit $LASTEXITCODE" % cmd)
pwsh_in_data = in_data
display.vvv(u"PSRP: EXEC %s" % script, host=self._psrp_host)
rc, stdout, stderr = self._exec_psrp_script(script, pwsh_in_data)
return rc, stdout, stderr
def put_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._psrp_host)
copy_script = """begin {
$ErrorActionPreference = "Stop"
$WarningPreference = "Continue"
$path = $MyInvocation.UnboundArguments[0]
$fd = [System.IO.File]::Create($path)
$algo = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @()
$bindingFlags = [System.Reflection.BindingFlags]'NonPublic, Instance'
Function Get-Property {
<#
.SYNOPSIS
Gets the private/internal property specified of the object passed in.
#>
Param (
[Parameter(Mandatory=$true, ValueFromPipeline=$true)]
[System.Object]
$Object,
[Parameter(Mandatory=$true, Position=1)]
[System.String]
$Name
)
$Object.GetType().GetProperty($Name, $bindingFlags).GetValue($Object, $null)
}
Function Set-Property {
<#
.SYNOPSIS
Sets the private/internal property specified on the object passed in.
#>
Param (
[Parameter(Mandatory=$true, ValueFromPipeline=$true)]
[System.Object]
$Object,
[Parameter(Mandatory=$true, Position=1)]
[System.String]
$Name,
[Parameter(Mandatory=$true, Position=2)]
[AllowNull()]
[System.Object]
$Value
)
$Object.GetType().GetProperty($Name, $bindingFlags).SetValue($Object, $Value, $null)
}
Function Get-Field {
<#
.SYNOPSIS
Gets the private/internal field specified of the object passed in.
#>
Param (
[Parameter(Mandatory=$true, ValueFromPipeline=$true)]
[System.Object]
$Object,
[Parameter(Mandatory=$true, Position=1)]
[System.String]
$Name
)
$Object.GetType().GetField($Name, $bindingFlags).GetValue($Object)
}
# MaximumAllowedMemory is required to be set to so we can send input data that exceeds the limit on a PS
# Runspace. We use reflection to access/set this property as it is not accessible publicly. This is not ideal
# but works on all PowerShell versions I've tested with. We originally used WinRS to send the raw bytes to the
# host but this falls flat if someone is using a custom PS configuration name so this is a workaround. This
# isn't required for smaller files so if it fails we ignore the error and hope it wasn't needed.
# https://github.com/PowerShell/PowerShell/blob/c8e72d1e664b1ee04a14f226adf655cced24e5f0/src/System.Management.Automation/engine/serialization.cs#L325
try {
$Host | Get-Property 'ExternalHost' | `
Get-Field '_transportManager' | `
Get-Property 'Fragmentor' | `
Get-Property 'DeserializationContext' | `
Set-Property 'MaximumAllowedMemory' $null
} catch {}
}
process {
$bytes = [System.Convert]::FromBase64String($input)
$algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null
$fd.Write($bytes, 0, $bytes.Length)
}
end {
$fd.Close()
$algo.TransformFinalBlock($bytes, 0, 0) > $null
$hash = [System.BitConverter]::ToString($algo.Hash).Replace('-', '').ToLowerInvariant()
Write-Output -InputObject "{`"sha1`":`"$hash`"}"
}
"""
# Get the buffer size of each fragment to send, subtract 82 for the fragment, message, and other header info
# fields that PSRP adds. Adjust to size of the base64 encoded bytes length.
buffer_size = int((self.runspace.connection.max_payload_size - 82) / 4 * 3)
sha1_hash = sha1()
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
if not os.path.exists(b_in_path):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
def read_gen():
offset = 0
with open(b_in_path, 'rb') as src_fd:
for b_data in iter((lambda: src_fd.read(buffer_size)), b""):
data_len = len(b_data)
offset += data_len
sha1_hash.update(b_data)
# PSRP technically supports sending raw bytes but that method requires a larger CLIXML message.
# Sending base64 is still more efficient here.
display.vvvvv("PSRP PUT %s to %s (offset=%d, size=%d" % (in_path, out_path, offset, data_len),
host=self._psrp_host)
b64_data = base64.b64encode(b_data)
yield [to_text(b64_data)]
if offset == 0: # empty file
yield [""]
rc, stdout, stderr = self._exec_psrp_script(copy_script, read_gen(), arguments=[out_path])
if rc != 0:
raise AnsibleError(to_native(stderr))
put_output = json.loads(to_text(stdout))
local_sha1 = sha1_hash.hexdigest()
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned, stdout: '%s', stderr: '%s'"
% (to_native(stdout), to_native(stderr)))
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash %s does not match local hash %s"
% (to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path),
host=self._psrp_host)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
# because we are dealing with base64 data we need to get the max size
# of the bytes that the base64 size would equal
max_b64_size = int(self.runspace.connection.max_payload_size -
(self.runspace.connection.max_payload_size / 4 * 3))
buffer_size = max_b64_size - (max_b64_size % 1024)
# setup the file stream with read only mode
setup_script = """param([string]$Path)
$ErrorActionPreference = "Stop"
if (Test-Path -LiteralPath $path -PathType Leaf) {
$fs = New-Object -TypeName System.IO.FileStream -ArgumentList @(
$path,
[System.IO.FileMode]::Open,
[System.IO.FileAccess]::Read,
[System.IO.FileShare]::Read
)
} elseif (Test-Path -Path $path -PathType Container) {
Write-Output -InputObject "[DIR]"
} else {
Write-Error -Message "$path does not exist"
$host.SetShouldExit(1)
}"""
# read the file stream at the offset and return the b64 string
read_script = """param([int64]$Offset, [int]$BufferSize)
$ErrorActionPreference = "Stop"
$fs.Seek($Offset, [System.IO.SeekOrigin]::Begin) > $null
$buffer = New-Object -TypeName byte[] -ArgumentList $BufferSize
$read = $fs.Read($buffer, 0, $buffer.Length)
if ($read -gt 0) {
[System.Convert]::ToBase64String($buffer, 0, $read)
}"""
# need to run the setup script outside of the local scope so the
# file stream stays active between fetch operations
rc, stdout, stderr = self._exec_psrp_script(
setup_script,
use_local_scope=False,
arguments=[in_path],
)
if rc != 0:
raise AnsibleError("failed to setup file stream for fetch '%s': %s"
% (out_path, to_native(stderr)))
elif stdout.strip() == '[DIR]':
# to be consistent with other connection plugins, we assume the caller has created the target dir
return
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
# to be consistent with other connection plugins, we assume the caller has created the target dir
offset = 0
with open(b_out_path, 'wb') as out_file:
while True:
display.vvvvv("PSRP FETCH %s to %s (offset=%d" %
(in_path, out_path, offset), host=self._psrp_host)
rc, stdout, stderr = self._exec_psrp_script(
read_script,
arguments=[offset, buffer_size],
)
if rc != 0:
raise AnsibleError("failed to transfer file to '%s': %s"
% (out_path, to_native(stderr)))
data = base64.b64decode(stdout.strip())
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
rc, stdout, stderr = self._exec_psrp_script("$fs.Close()")
if rc != 0:
display.warning("failed to close remote file stream of file "
"'%s': %s" % (in_path, to_native(stderr)))
def close(self) -> None:
if self.runspace and self.runspace.state == RunspacePoolState.OPENED:
display.vvvvv("PSRP CLOSE RUNSPACE: %s" % (self.runspace.id),
host=self._psrp_host)
self.runspace.close()
self.runspace = None
self._connected = False
self._last_pipeline = None
def _build_kwargs(self) -> None:
self._psrp_host = self.get_option('remote_addr')
self._psrp_user = self.get_option('remote_user')
protocol = self.get_option('protocol')
port = self.get_option('port')
if protocol is None and port is None:
protocol = 'https'
port = 5986
elif protocol is None:
protocol = 'https' if int(port) != 5985 else 'http'
elif port is None:
port = 5986 if protocol == 'https' else 5985
self._psrp_port = int(port)
self._psrp_auth = self.get_option('auth')
self._psrp_configuration_name = self.get_option('configuration_name')
# cert validation can either be a bool or a path to the cert
cert_validation = self.get_option('cert_validation')
cert_trust_path = self.get_option('ca_cert')
if cert_validation == 'ignore':
psrp_cert_validation = False
elif cert_trust_path is not None:
psrp_cert_validation = cert_trust_path
else:
psrp_cert_validation = True
self._psrp_conn_kwargs = dict(
server=self._psrp_host,
port=self._psrp_port,
username=self._psrp_user,
password=self.get_option('remote_password'),
ssl=protocol == 'https',
path=self.get_option('path'),
auth=self._psrp_auth,
cert_validation=psrp_cert_validation,
connection_timeout=self.get_option('connection_timeout'),
encryption=self.get_option('message_encryption'),
proxy=self.get_option('proxy'),
no_proxy=boolean(self.get_option('ignore_proxy')),
max_envelope_size=self.get_option('max_envelope_size'),
operation_timeout=self.get_option('operation_timeout'),
read_timeout=self.get_option('read_timeout'),
reconnection_retries=self.get_option('reconnection_retries'),
reconnection_backoff=float(self.get_option('reconnection_backoff')),
certificate_key_pem=self.get_option('certificate_key_pem'),
certificate_pem=self.get_option('certificate_pem'),
credssp_auth_mechanism=self.get_option('credssp_auth_mechanism'),
credssp_disable_tlsv1_2=self.get_option('credssp_disable_tlsv1_2'),
credssp_minimum_version=self.get_option('credssp_minimum_version'),
negotiate_send_cbt=self.get_option('negotiate_send_cbt'),
negotiate_delegate=self.get_option('negotiate_delegate'),
negotiate_hostname_override=self.get_option('negotiate_hostname_override'),
negotiate_service=self.get_option('negotiate_service'),
)
def _exec_psrp_script(
self,
script: str,
input_data: bytes | str | t.Iterable | None = None,
use_local_scope: bool = True,
arguments: t.Iterable[t.Any] | None = None,
) -> tuple[int, bytes, bytes]:
# Check if there's a command on the current pipeline that still needs to be closed.
if self._last_pipeline:
# Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we
# can call stop without any issues.
self._last_pipeline.state = PSInvocationState.RUNNING
self._last_pipeline.stop()
self._last_pipeline = None
ps = PowerShell(self.runspace)
ps.add_script(script, use_local_scope=use_local_scope)
if arguments:
for arg in arguments:
ps.add_argument(arg)
ps.invoke(input=input_data)
rc, stdout, stderr = self._parse_pipeline_result(ps)
# We should really call .stop() on all pipelines that are run to decrement the concurrent command counter on
# PSSession but that involves another round trip and is done when the runspace is closed. We instead store the
# last pipeline which is closed if another command is run on the runspace.
self._last_pipeline = ps
return rc, stdout, stderr
def _parse_pipeline_result(self, pipeline: PowerShell) -> tuple[int, bytes, bytes]:
"""
PSRP doesn't have the same concept as other protocols with its output.
We need some extra logic to convert the pipeline streams and host
output into the format that Ansible understands.
:param pipeline: The finished PowerShell pipeline that invoked our
commands
:return: rc, stdout, stderr based on the pipeline output
"""
# we try and get the rc from our host implementation, this is set if
# exit or $host.SetShouldExit() is called in our pipeline, if not we
# set to 0 if the pipeline had not errors and 1 if it did
rc = self.host.rc or (1 if pipeline.had_errors else 0)
# TODO: figure out a better way of merging this with the host output
stdout_list = []
for output in pipeline.output:
# Not all pipeline outputs are a string or contain a __str__ value,
# we will create our own output based on the properties of the
# complex object if that is the case.
if isinstance(output, GenericComplexObject) and output.to_string is None:
obj_lines = output.property_sets
for key, value in output.adapted_properties.items():
obj_lines.append(u"%s: %s" % (key, value))
for key, value in output.extended_properties.items():
obj_lines.append(u"%s: %s" % (key, value))
output_msg = u"\n".join(obj_lines)
else:
output_msg = to_text(output, nonstring='simplerepr')
stdout_list.append(output_msg)
if len(self.host.ui.stdout) > 0:
stdout_list += self.host.ui.stdout
stdout = u"\r\n".join(stdout_list)
stderr_list = []
for error in pipeline.streams.error:
# the error record is not as fully fleshed out like we usually get
# in PS, we will manually create it here
command_name = "%s : " % error.command_name if error.command_name else ''
position = "%s\r\n" % error.invocation_position_message if error.invocation_position_message else ''
error_msg = "%s%s\r\n%s" \
" + CategoryInfo : %s\r\n" \
" + FullyQualifiedErrorId : %s" \
% (command_name, str(error), position,
error.message, error.fq_error)
stacktrace = error.script_stacktrace
if display.verbosity >= 3 and stacktrace is not None:
error_msg += "\r\nStackTrace:\r\n%s" % stacktrace
stderr_list.append(error_msg)
if len(self.host.ui.stderr) > 0:
stderr_list += self.host.ui.stderr
stderr = u"\r\n".join([to_text(o) for o in stderr_list])
display.vvvvv("PSRP RC: %d" % rc, host=self._psrp_host)
display.vvvvv("PSRP STDOUT: %s" % stdout, host=self._psrp_host)
display.vvvvv("PSRP STDERR: %s" % stderr, host=self._psrp_host)
# reset the host back output back to defaults, needed if running
# multiple pipelines on the same RunspacePool
self.host.rc = 0
self.host.ui.stdout = []
self.host.ui.stderr = []
return rc, to_bytes(stdout, encoding='utf-8'), to_bytes(stderr, encoding='utf-8')
| 33,543
|
Python
|
.py
| 776
| 34.460052
| 154
| 0.633412
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,409
|
winrm.py
|
ansible_ansible/lib/ansible/plugins/connection/winrm.py
|
# (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
author: Ansible Core Team
name: winrm
short_description: Run tasks over Microsoft's WinRM
description:
- Run commands or put/fetch on a target via WinRM
- This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here.
They should take the form of variables declared with the following pattern C(ansible_winrm_<option>).
version_added: "2.0"
extends_documentation_fragment:
- connection_pipelining
requirements:
- pywinrm (python library)
options:
# figure out more elegant 'delegation'
remote_addr:
description:
- Address of the windows machine
default: inventory_hostname
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_winrm_host
type: str
remote_user:
description:
- The user to log in as to the Windows machine
vars:
- name: ansible_user
- name: ansible_winrm_user
keyword:
- name: remote_user
type: str
remote_password:
description: Authentication password for the O(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_winrm_pass
- name: ansible_winrm_password
type: str
aliases:
- password # Needed for --ask-pass to come through on delegation
port:
description:
- port for winrm to connect on remote target
- The default is the https (5986) port, if using http it should be 5985
vars:
- name: ansible_port
- name: ansible_winrm_port
default: 5986
keyword:
- name: port
type: integer
scheme:
description:
- URI scheme to use
- If not set, then will default to V(https) or V(http) if O(port) is
V(5985).
choices: [http, https]
vars:
- name: ansible_winrm_scheme
type: str
path:
description: URI path to connect to
default: '/wsman'
vars:
- name: ansible_winrm_path
type: str
transport:
description:
- List of winrm transports to attempt to use (ssl, plaintext, kerberos, etc)
- If None (the default) the plugin will try to automatically guess the correct list
- The choices available depend on your version of pywinrm
type: list
elements: string
vars:
- name: ansible_winrm_transport
kerberos_command:
description: kerberos command to use to request a authentication ticket
default: kinit
vars:
- name: ansible_winrm_kinit_cmd
type: str
kinit_args:
description:
- Extra arguments to pass to C(kinit) when getting the Kerberos authentication ticket.
- By default no extra arguments are passed into C(kinit) unless I(ansible_winrm_kerberos_delegation) is also
set. In that case C(-f) is added to the C(kinit) args so a forwardable ticket is retrieved.
- If set, the args will overwrite any existing defaults for C(kinit), including C(-f) for a delegated ticket.
type: str
vars:
- name: ansible_winrm_kinit_args
version_added: '2.11'
kinit_env_vars:
description:
- A list of environment variables to pass through to C(kinit) when getting the Kerberos authentication ticket.
- By default no environment variables are passed through and C(kinit) is run with a blank slate.
- The environment variable C(KRB5CCNAME) cannot be specified here as it's used to store the temp Kerberos
ticket used by WinRM.
type: list
elements: str
default: []
ini:
- section: winrm
key: kinit_env_vars
vars:
- name: ansible_winrm_kinit_env_vars
version_added: '2.12'
kerberos_mode:
description:
- kerberos usage mode.
- The managed option means Ansible will obtain kerberos ticket.
- While the manual one means a ticket must already have been obtained by the user.
- If having issues with Ansible freezing when trying to obtain the
Kerberos ticket, you can either set this to V(manual) and obtain
it outside Ansible or install C(pexpect) through pip and try
again.
choices: [managed, manual]
vars:
- name: ansible_winrm_kinit_mode
type: str
connection_timeout:
description:
- Despite its name, sets both the 'operation' and 'read' timeout settings for the WinRM
connection.
- The operation timeout belongs to the WS-Man layer and runs on the winRM-service on the
managed windows host.
- The read timeout belongs to the underlying python Request call (http-layer) and runs
on the ansible controller.
- The operation timeout sets the WS-Man 'Operation timeout' that runs on the managed
windows host. The operation timeout specifies how long a command will run on the
winRM-service before it sends the message 'WinRMOperationTimeoutError' back to the
client. The client (silently) ignores this message and starts a new instance of the
operation timeout, waiting for the command to finish (long running commands).
- The read timeout sets the client HTTP-request timeout and specifies how long the
client (ansible controller) will wait for data from the server to come back over
the HTTP-connection (timeout for waiting for in-between messages from the server).
When this timer expires, an exception will be thrown and the ansible connection
will be terminated with the error message 'Read timed out'
- To avoid the above exception to be thrown, the read timeout will be set to 10
seconds higher than the WS-Man operation timeout, thus make the connection more
robust on networks with long latency and/or many hops between server and client
network wise.
- Setting the difference between the operation and the read timeout to 10 seconds
aligns it to the defaults used in the winrm-module and the PSRP-module which also
uses 10 seconds (30 seconds for read timeout and 20 seconds for operation timeout)
- Corresponds to the C(operation_timeout_sec) and
C(read_timeout_sec) args in pywinrm so avoid setting these vars
with this one.
- The default value is whatever is set in the installed version of
pywinrm.
vars:
- name: ansible_winrm_connection_timeout
type: int
"""
import base64
import logging
import os
import re
import traceback
import json
import tempfile
import shlex
import subprocess
import time
import typing as t
import xml.etree.ElementTree as ET
from inspect import getfullargspec
from urllib.parse import urlunsplit
HAVE_KERBEROS = False
try:
import kerberos # pylint: disable=unused-import
HAVE_KERBEROS = True
except ImportError:
pass
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import _parse_clixml
from ansible.plugins.shell.powershell import ShellBase as PowerShellBase
from ansible.utils.hashing import secure_hash
from ansible.utils.display import Display
try:
import winrm
from winrm.exceptions import WinRMError, WinRMOperationTimeoutError, WinRMTransportError
from winrm.protocol import Protocol
import requests.exceptions
HAS_WINRM = True
WINRM_IMPORT_ERR = None
except ImportError as e:
HAS_WINRM = False
WINRM_IMPORT_ERR = e
try:
from winrm.exceptions import WSManFaultError
except ImportError:
# This was added in pywinrm 0.5.0, we just use our no-op exception for
# older versions which won't be able to handle this scenario.
class WSManFaultError(Exception): # type: ignore[no-redef]
pass
try:
import xmltodict
HAS_XMLTODICT = True
XMLTODICT_IMPORT_ERR = None
except ImportError as e:
HAS_XMLTODICT = False
XMLTODICT_IMPORT_ERR = e
HAS_PEXPECT = False
try:
import pexpect
# echo was added in pexpect 3.3+ which is newer than the RHEL package
# we can only use pexpect for kerb auth if echo is a valid kwarg
# https://github.com/ansible/ansible/issues/43462
if hasattr(pexpect, 'spawn'):
argspec = getfullargspec(pexpect.spawn.__init__)
if 'echo' in argspec.args:
HAS_PEXPECT = True
except ImportError as e:
pass
# used to try and parse the hostname and detect if IPv6 is being used
try:
import ipaddress
HAS_IPADDRESS = True
except ImportError:
HAS_IPADDRESS = False
display = Display()
class Connection(ConnectionBase):
"""WinRM connections over HTTP/HTTPS."""
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
allow_executable = False
has_pipelining = True
allow_extras = True
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol: winrm.Protocol | None = None
self.shell_id: str | None = None
self.delegate = None
self._shell: PowerShellBase
self._shell_type = 'powershell'
super(Connection, self).__init__(*args, **kwargs)
if not C.DEFAULT_DEBUG:
logging.getLogger('requests_credssp').setLevel(logging.INFO)
logging.getLogger('requests_kerberos').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
def _build_winrm_kwargs(self) -> None:
# this used to be in set_options, as win_reboot needs to be able to
# override the conn timeout, we need to be able to build the args
# after setting individual options. This is called by _connect before
# starting the WinRM connection
self._winrm_host = self.get_option('remote_addr')
self._winrm_user = self.get_option('remote_user')
self._winrm_pass = self.get_option('remote_password')
self._winrm_port = self.get_option('port')
self._winrm_scheme = self.get_option('scheme')
# old behaviour, scheme should default to http if not set and the port
# is 5985 otherwise https
if self._winrm_scheme is None:
self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https'
self._winrm_path = self.get_option('path')
self._kinit_cmd = self.get_option('kerberos_command')
self._winrm_transport = self.get_option('transport')
self._winrm_connection_timeout = self.get_option('connection_timeout')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# calculate transport if needed
if self._winrm_transport is None or self._winrm_transport[0] is None:
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext']
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = ['kerberos'] + transport_selector
else:
self._winrm_transport = transport_selector
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' %
to_native(list(unsupported_transports), nonstring='simplerepr'))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = self.get_option('kerberos_mode')
if kinit_mode is None:
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "")
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
# arg names we're going passing directly
internal_kwarg_mask = {'self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'}
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = getfullargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = {v.replace('ansible_winrm_', '') for v in self.get_option('_extras')}
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching extras, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal: str, password: str) -> None:
if password is None:
password = ""
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
os.environ["KRB5CCNAME"] = krb5ccname
krb5env = dict(PATH=os.environ["PATH"], KRB5CCNAME=krb5ccname)
# Add any explicit environment vars into the krb5env block
kinit_env_vars = self.get_option('kinit_env_vars')
for var in kinit_env_vars:
if var not in krb5env and var in os.environ:
krb5env[var] = os.environ[var]
# Stores various flags to call with kinit, these could be explicit args set by 'ansible_winrm_kinit_args' OR
# '-f' if kerberos delegation is requested (ansible_winrm_kerberos_delegation).
kinit_cmdline = [self._kinit_cmd]
kinit_args = self.get_option('kinit_args')
if kinit_args:
kinit_args = [to_text(a) for a in shlex.split(kinit_args) if a.strip()]
kinit_cmdline.extend(kinit_args)
elif boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
kinit_cmdline.append('-f')
kinit_cmdline.append(principal)
# pexpect runs the process in its own pty so it can correctly send
# the password as input even on MacOS which blocks subprocess from
# doing so. Unfortunately it is not available on the built in Python
# so we can only use it if someone has installed it
if HAS_PEXPECT:
proc_mechanism = "pexpect"
command = kinit_cmdline.pop(0)
password = to_text(password, encoding='utf-8',
errors='surrogate_or_strict')
display.vvvv("calling kinit with pexpect for principal %s"
% principal)
try:
child = pexpect.spawn(command, kinit_cmdline, timeout=60,
env=krb5env, echo=False)
except pexpect.ExceptionPexpect as err:
err_msg = "Kerberos auth failure when calling kinit cmd " \
"'%s': %s" % (command, to_native(err))
raise AnsibleConnectionFailure(err_msg)
try:
child.expect(".*:")
child.sendline(password)
except OSError as err:
# child exited before the pass was sent, Ansible will raise
# error based on the rc below, just display the error here
display.vvvv("kinit with pexpect raised OSError: %s"
% to_native(err))
# technically this is the stdout + stderr but to match the
# subprocess error checking behaviour, we will call it stderr
stderr = child.read()
child.wait()
rc = child.exitstatus
else:
proc_mechanism = "subprocess"
b_password = to_bytes(password, encoding='utf-8',
errors='surrogate_or_strict')
display.vvvv("calling kinit with subprocess for principal %s"
% principal)
try:
p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=krb5env)
except OSError as err:
err_msg = "Kerberos auth failure when calling kinit cmd " \
"'%s': %s" % (self._kinit_cmd, to_native(err))
raise AnsibleConnectionFailure(err_msg)
stdout, stderr = p.communicate(b_password + b'\n')
rc = p.returncode != 0
if rc != 0:
# one last attempt at making sure the password does not exist
# in the output
exp_msg = to_native(stderr.strip())
exp_msg = exp_msg.replace(to_native(password), "<redacted>")
err_msg = "Kerberos auth failure for principal %s with %s: %s" \
% (principal, proc_mechanism, exp_msg)
raise AnsibleConnectionFailure(err_msg)
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self) -> winrm.Protocol:
"""
Establish a WinRM connection over HTTP/HTTPS.
"""
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
winrm_host = self._winrm_host
if HAS_IPADDRESS:
display.debug("checking if winrm_host %s is an IPv6 address" % winrm_host)
try:
ipaddress.IPv6Address(winrm_host)
except ipaddress.AddressValueError:
pass
else:
winrm_host = "[%s]" % winrm_host
netloc = '%s:%d' % (winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
winrm_kwargs = self._winrm_kwargs.copy()
if self._winrm_connection_timeout:
winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout
winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 10
protocol = Protocol(endpoint, transport=transport, **winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_write_stdin(self, command_id: str, stdin_iterator: t.Iterable[tuple[bytes, bool]]) -> None:
for (data, is_last) in stdin_iterator:
for attempt in range(1, 4):
try:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except WinRMOperationTimeoutError:
# A WSMan OperationTimeout can be received for a Send
# operation when the server is under severe load. On manual
# testing the input is still processed and it's safe to
# continue. As the calling method still tries to wait for
# the proc to end if this failed it shouldn't hurt to just
# treat this as a warning.
display.warning(
"WSMan OperationTimeout during send input, attempting to continue. "
"If this continues to occur, try increasing the connection_timeout "
"value for this host."
)
if not is_last:
time.sleep(5)
except WinRMError as e:
# Error 170 == ERROR_BUSY. This could be the result of a
# timed out Send from above still being processed on the
# server. Add a 5 second delay and try up to 3 times before
# fully giving up.
# pywinrm does not expose the internal WSMan fault details
# through an actual object but embeds it as a repr.
if attempt == 3 or "'wsmanfault_code': '170'" not in str(e):
raise
display.warning(f"WSMan send failed on attempt {attempt} as the command is busy, trying to send data again")
time.sleep(5)
continue
break
def _winrm_send_input(self, protocol: winrm.Protocol, shell_id: str, command_id: str, stdin: bytes, eof: bool = False) -> None:
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_get_raw_command_output(
self,
protocol: winrm.Protocol,
shell_id: str,
command_id: str,
) -> tuple[bytes, bytes, int, bool]:
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Receive', {})\
.setdefault('rsp:DesiredStream', {})
stream['@CommandId'] = command_id
stream['#text'] = 'stdout stderr'
res = protocol.send_message(xmltodict.unparse(rq))
root = ET.fromstring(res)
stream_nodes = [
node for node in root.findall('.//*')
if node.tag.endswith('Stream')]
stdout = []
stderr = []
return_code = -1
for stream_node in stream_nodes:
if not stream_node.text:
continue
if stream_node.attrib['Name'] == 'stdout':
stdout.append(base64.b64decode(stream_node.text.encode('ascii')))
elif stream_node.attrib['Name'] == 'stderr':
stderr.append(base64.b64decode(stream_node.text.encode('ascii')))
command_done = len([
node for node in root.findall('.//*')
if node.get('State', '').endswith('CommandState/Done')]) == 1
if command_done:
return_code = int(
next(node for node in root.findall('.//*')
if node.tag.endswith('ExitCode')).text)
return b"".join(stdout), b"".join(stderr), return_code, command_done
def _winrm_get_command_output(
self,
protocol: winrm.Protocol,
shell_id: str,
command_id: str,
try_once: bool = False,
) -> tuple[bytes, bytes, int]:
stdout_buffer, stderr_buffer = [], []
command_done = False
return_code = -1
while not command_done:
try:
stdout, stderr, return_code, command_done = \
self._winrm_get_raw_command_output(protocol, shell_id, command_id)
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
# If we were able to get output at least once then we should be
# able to get the rest.
try_once = False
except WinRMOperationTimeoutError:
# This is an expected error when waiting for a long-running process,
# just silently retry if we haven't been set to do one attempt.
if try_once:
break
continue
return b''.join(stdout_buffer), b''.join(stderr_buffer), return_code
def _winrm_exec(
self,
command: str,
args: t.Iterable[bytes] = (),
from_exec: bool = False,
stdin_iterator: t.Iterable[tuple[bytes, bool]] = None,
) -> tuple[int, bytes, bytes]:
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self._winrm_run_command(
to_bytes(command),
tuple(map(to_bytes, args)),
console_mode_stdin=(stdin_iterator is None),
)
try:
if stdin_iterator:
self._winrm_write_stdin(command_id, stdin_iterator)
except Exception as ex:
display.warning("ERROR DURING WINRM SEND INPUT - attempting to recover: %s %s"
% (type(ex).__name__, to_text(ex)))
display.debug(traceback.format_exc())
stdin_push_failed = True
# Even on a failure above we try at least once to get the output
# in case the stdin was actually written and it an normally.
b_stdout, b_stderr, rc = self._winrm_get_command_output(
self.protocol,
self.shell_id,
command_id,
try_once=stdin_push_failed,
)
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
if from_exec:
display.vvvvv('WINRM RESULT <Response code %d, out %r, err %r>' % (rc, stdout, stderr), host=self._winrm_host)
display.vvvvvv('WINRM RC %d' % rc, host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % stdout, host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % stderr, host=self._winrm_host)
# This is done after logging so we can still see the raw stderr for
# debugging purposes.
if b_stderr.startswith(b"#< CLIXML"):
b_stderr = _parse_clixml(b_stderr)
stderr = to_text(stderr)
if stdin_push_failed:
# There are cases where the stdin input failed but the WinRM service still processed it. We attempt to
# see if stdout contains a valid json return value so we can ignore this error
try:
filtered_output, dummy = _filter_non_json_lines(stdout)
json.loads(filtered_output)
except ValueError:
# stdout does not contain a return response, stdin input was a fatal error
raise AnsibleError(f'winrm send_input failed; \nstdout: {stdout}\nstderr {stderr}')
return rc, b_stdout, b_stderr
except requests.exceptions.Timeout as exc:
raise AnsibleConnectionFailure('winrm connection error: %s' % to_native(exc))
finally:
if command_id:
# Due to a bug in how pywinrm works with message encryption we
# ignore a 400 error which can occur when a task timeout is
# set and the code tries to clean up the command. This happens
# as the cleanup msg is sent over a new socket but still uses
# the already encrypted payload bound to the other socket
# causing the server to reply with 400 Bad Request.
try:
self.protocol.cleanup_command(self.shell_id, command_id)
except WinRMTransportError as e:
if e.code != 400:
raise
display.warning("Failed to cleanup running WinRM command, resources might still be in use on the target server")
def _winrm_run_command(
self,
command: bytes,
args: tuple[bytes, ...],
console_mode_stdin: bool = False,
) -> str:
"""Starts a command with handling when the WSMan quota is exceeded."""
try:
return self.protocol.run_command(
self.shell_id,
command,
args,
console_mode_stdin=console_mode_stdin,
)
except WSManFaultError as fault_error:
if fault_error.wmierror_code != 0x803381A6:
raise
# 0x803381A6 == ERROR_WSMAN_QUOTA_MAX_OPERATIONS
# WinRS does not decrement the operation count for commands,
# only way to avoid this is to re-create the shell. This is
# important for action plugins that might be running multiple
# processes in the same connection.
display.vvvvv("Shell operation quota exceeded, re-creating shell", host=self._winrm_host)
self.close()
self._connect()
return self.protocol.run_command(
self.shell_id,
command,
args,
console_mode_stdin=console_mode_stdin,
)
def _connect(self) -> Connection:
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR))
super(Connection, self)._connect()
if not self.protocol:
self._build_winrm_kwargs() # build the kwargs from the options set
self.protocol = self._winrm_connect()
self._connected = True
return self
def reset(self) -> None:
if not self._connected:
return
self.protocol = None
self.shell_id = None
self._connect()
def _wrapper_payload_stream(self, payload: bytes, buffer_size: int = 200000) -> t.Iterable[tuple[bytes, bool]]:
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
return self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path: str, out_path: str, buffer_size: int = 250000) -> t.Iterable[tuple[bytes, bool]]:
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), b''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + b'\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield b"", True
def put_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
script_template = u"""
begin {{
$path = '{0}'
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
$fd = [System.IO.File]::Create($path)
$sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @() #initialize for empty file case
}}
process {{
$bytes = [System.Convert]::FromBase64String($input)
$sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
$fd.Write($bytes, 0, $bytes.Length)
}}
end {{
$sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
$hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
$fd.Close()
Write-Output "{{""sha1"":""$hash""}}"
}}
"""
script = script_template.format(self._shell._escape(out_path))
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
status_code, b_stdout, b_stderr = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
if status_code != 0:
raise AnsibleError(stderr)
try:
put_output = json.loads(stdout)
except ValueError:
# stdout does not contain a valid response
raise AnsibleError('winrm put_file failed; \nstdout: %s\nstderr %s' % (stdout, stderr))
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
# consistent with other connection plugins, we assume the caller has created the target dir
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
out_file = None
try:
offset = 0
while True:
try:
script = """
$path = '%(path)s'
If (Test-Path -LiteralPath $path -PathType Leaf)
{
$buffer_size = %(buffer_size)d
$offset = %(offset)d
$stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)
$stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null
$buffer = New-Object -TypeName byte[] $buffer_size
$bytes_read = $stream.Read($buffer, 0, $buffer_size)
if ($bytes_read -gt 0) {
$bytes = $buffer[0..($bytes_read - 1)]
[System.Convert]::ToBase64String($bytes)
}
$stream.Close() > $null
}
ElseIf (Test-Path -LiteralPath $path -PathType Container)
{
Write-Host "[DIR]";
}
Else
{
Write-Error "$path does not exist";
Exit 1;
}
""" % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
status_code, b_stdout, b_stderr = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
if status_code != 0:
raise IOError(stderr)
if stdout.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(stdout.strip())
if data is None:
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path))
finally:
if out_file:
out_file.close()
def close(self) -> None:
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
| 42,188
|
Python
|
.py
| 827
| 38.407497
| 156
| 0.591059
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,410
|
sh.py
|
ansible_ansible/lib/ansible/plugins/shell/sh.py
|
# Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: sh
short_description: "POSIX shell (/bin/sh)"
version_added: historical
description:
- This shell plugin is the one you want to use on most Unix systems, it is the most compatible and widely installed shell.
extends_documentation_fragment:
- shell_common
"""
from ansible.plugins.shell import ShellBase
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles.
# Note: sh is the default shell plugin so this plugin may also be selected
# This code needs to be SH-compliant. BASH-isms will not work if /bin/sh points to a non-BASH shell.
# if the filename is not listed in any Shell plugin.
COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh'))
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'sh'
# commonly used
ECHO = 'echo'
COMMAND_SEP = ';'
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
_SHELL_AND = '&&'
_SHELL_OR = '||'
_SHELL_SUB_LEFT = '"`'
_SHELL_SUB_RIGHT = '`"'
_SHELL_GROUP_LEFT = '('
_SHELL_GROUP_RIGHT = ')'
def checksum(self, path, python_interp):
# In the following test, each condition is a check and logical
# comparison (|| or &&) that sets the rc value. Every check is run so
# the last check in the series to fail will be the rc that is returned.
#
# If a check fails we error before invoking the hash functions because
# hash functions may successfully take the hash of a directory on BSDs
# (UFS filesystem?) which is not what the rest of the ansible code expects
#
# If all of the available hashing methods fail we fail with an rc of 0.
# This logic is added to the end of the cmd at the bottom of this function.
# Return codes:
# checksum: success!
# 0: Unknown error
# 1: Remote file does not exist
# 2: No read permissions on the file
# 3: File is a directory
# 4: No python interpreter
# Quoting gets complex here. We're writing a python string that's
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = self.quote(path)
test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
csums = [
u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
]
cmd = (" %s " % self._SHELL_OR).join(csums)
cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
return cmd
| 3,884
|
Python
|
.py
| 64
| 54.4375
| 396
| 0.644958
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,411
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/shell/__init__.py
|
# (c) 2016 RedHat
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import os.path
import re
import secrets
import shlex
import time
from collections.abc import Mapping, Sequence
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import text_type, string_types
from ansible.plugins import AnsiblePlugin
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellBase(AnsiblePlugin):
def __init__(self):
super(ShellBase, self).__init__()
# Not used but here for backwards compatibility.
# ansible.posix.fish uses (but does not actually use) this value.
# https://github.com/ansible-collections/ansible.posix/blob/f41f08e9e3d3129e709e122540b5ae6bc19932be/plugins/shell/fish.py#L38-L39
self.env = {}
self.tmpdir = None
self.executable = None
def _normalize_system_tmpdirs(self):
# Normalize the tmp directory strings. We don't use expanduser/expandvars because those
# can vary between remote user and become user. Therefore the safest practice will be for
# this to always be specified as full paths)
normalized_paths = [d.rstrip('/') for d in self.get_option('system_tmpdirs')]
# Make sure all system_tmpdirs are absolute otherwise they'd be relative to the login dir
# which is almost certainly going to fail in a cornercase.
if not all(os.path.isabs(d) for d in normalized_paths):
raise AnsibleError('The configured system_tmpdirs contains a relative path: {0}. All'
' system_tmpdirs must be absolute'.format(to_native(normalized_paths)))
self.set_option('system_tmpdirs', normalized_paths)
def set_options(self, task_keys=None, var_options=None, direct=None):
super(ShellBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
# We can remove the try: except in the future when we make ShellBase a proper subset of
# *all* shells. Right now powershell and third party shells which do not use the
# shell_common documentation fragment (and so do not have system_tmpdirs) will fail
try:
self._normalize_system_tmpdirs()
except KeyError:
pass
@staticmethod
def _generate_temp_dir_name():
return 'ansible-tmp-%s-%s-%s' % (time.time(), os.getpid(), secrets.randbelow(2**48))
def env_prefix(self, **kwargs):
return ' '.join(['%s=%s' % (k, self.quote(text_type(v))) for k, v in kwargs.items()])
def join_path(self, *args):
return os.path.join(*args)
# some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
def get_remote_filename(self, pathname):
base_name = os.path.basename(pathname.strip())
return base_name.strip()
def path_has_trailing_slash(self, path):
return path.endswith('/')
def chmod(self, paths, mode):
cmd = ['chmod', mode]
cmd.extend(paths)
return self.join(cmd)
def chown(self, paths, user):
cmd = ['chown', user]
cmd.extend(paths)
return self.join(cmd)
def chgrp(self, paths, group):
cmd = ['chgrp', group]
cmd.extend(paths)
return self.join(cmd)
def set_user_facl(self, paths, user, mode):
"""Only sets acls for users as that's really all we need"""
cmd = ['setfacl', '-m', 'u:%s:%s' % (user, mode)]
cmd.extend(paths)
return self.join(cmd)
def remove(self, path, recurse=False):
path = self.quote(path)
cmd = 'rm -f '
if recurse:
cmd += '-r '
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
def exists(self, path):
cmd = ['test', '-e', self.quote(path)]
return ' '.join(cmd)
def mkdtemp(self, basefile=None, system=False, mode=0o700, tmpdir=None):
if not basefile:
basefile = self.__class__._generate_temp_dir_name()
# When system is specified we have to create this in a directory where
# other users can read and access the tmp directory.
# This is because we use system to create tmp dirs for unprivileged users who are
# sudo'ing to a second unprivileged user.
# The 'system_tmpdirs' setting defines directories we can use for this purpose
# the default are, /tmp and /var/tmp.
# So we only allow one of those locations if system=True, using the
# passed in tmpdir if it is valid or the first one from the setting if not.
if system:
if tmpdir:
tmpdir = tmpdir.rstrip('/')
if tmpdir in self.get_option('system_tmpdirs'):
basetmpdir = tmpdir
else:
basetmpdir = self.get_option('system_tmpdirs')[0]
else:
if tmpdir is None:
basetmpdir = self.get_option('remote_tmp')
else:
basetmpdir = tmpdir
basetmp = self.join_path(basetmpdir, basefile)
# use mkdir -p to ensure parents exist, but mkdir fullpath to ensure last one is created by us
cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmpdir, self._SHELL_SUB_RIGHT)
cmd += '%s mkdir %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
cmd += ' %s echo %s=%s echo %s %s' % (self._SHELL_AND, basefile, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
tmp_umask = 0o777 & ~mode
cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
return cmd
def expand_user(self, user_home_path, username=''):
""" Return a command to expand tildes in a path
It can be either "~" or "~username". We just ignore $HOME
We use the POSIX definition of a username:
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
Falls back to 'current working directory' as we assume 'home is where the remote user ends up'
"""
# Check that the user_path to expand is safe
if user_home_path != '~':
if not _USER_HOME_PATH_RE.match(user_home_path):
user_home_path = self.quote(user_home_path)
elif username:
# if present the user name is appended to resolve "that user's home"
user_home_path += username
return 'echo %s' % user_home_path
def pwd(self):
"""Return the working directory after connecting"""
return 'echo %spwd%s' % (self._SHELL_SUB_LEFT, self._SHELL_SUB_RIGHT)
def build_module_command(self, env_string, shebang, cmd, arg_path=None):
env_string = env_string.strip()
if env_string:
env_string += ' '
if shebang is None:
shebang = ''
cmd_parts = [
shebang.removeprefix('#!').strip(),
cmd.strip(),
arg_path,
]
cleaned_up_cmd = self.join(
stripped_cmd_part for raw_cmd_part in cmd_parts
if raw_cmd_part and (stripped_cmd_part := raw_cmd_part.strip())
)
return ''.join((env_string, cleaned_up_cmd))
def append_command(self, cmd, cmd_to_append):
"""Append an additional command if supported by the shell"""
if self._SHELL_AND:
cmd += ' %s %s' % (self._SHELL_AND, cmd_to_append)
return cmd
def wrap_for_exec(self, cmd):
"""wrap script execution with any necessary decoration (eg '&' for quoted powershell script paths)"""
return cmd
def quote(self, cmd):
"""Returns a shell-escaped string that can be safely used as one token in a shell command line"""
return shlex.quote(cmd)
def join(self, cmd_parts):
"""Returns a shell-escaped string from a list that can be safely used in a shell command line"""
return shlex.join(cmd_parts)
| 8,985
|
Python
|
.py
| 180
| 41.538889
| 138
| 0.643444
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,412
|
powershell.py
|
ansible_ansible/lib/ansible/plugins/shell/powershell.py
|
# Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = '''
name: powershell
version_added: historical
short_description: Windows PowerShell
description:
- The only option when using 'winrm' or 'psrp' as a connection plugin.
- Can also be used when using 'ssh' as a connection plugin and the C(DefaultShell) has been configured to PowerShell.
extends_documentation_fragment:
- shell_windows
'''
import base64
import os
import re
import shlex
import pkgutil
import xml.etree.ElementTree as ET
import ntpath
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.plugins.shell import ShellBase
# This is weird, we are matching on byte sequences that match the utf-16-be
# matches for '_x(a-fA-F0-9){4}_'. The \x00 and {8} will match the hex sequence
# when it is encoded as utf-16-be.
_STRING_DESERIAL_FIND = re.compile(rb"\x00_\x00x([\x00(a-fA-F0-9)]{8})\x00_")
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
def _parse_clixml(data: bytes, stream: str = "Error") -> bytes:
"""
Takes a byte string like '#< CLIXML\r\n<Objs...' and extracts the stream
message encoded in the XML data. CLIXML is used by PowerShell to encode
multiple objects in stderr.
"""
lines: list[str] = []
# A serialized string will serialize control chars and surrogate pairs as
# _xDDDD_ values where DDDD is the hex representation of a big endian
# UTF-16 code unit. As a surrogate pair uses 2 UTF-16 code units, we need
# to operate our text replacement on the utf-16-be byte encoding of the raw
# text. This allows us to replace the _xDDDD_ values with the actual byte
# values and then decode that back to a string from the utf-16-be bytes.
def rplcr(matchobj: re.Match) -> bytes:
match_hex = matchobj.group(1)
hex_string = match_hex.decode("utf-16-be")
return base64.b16decode(hex_string.upper())
# There are some scenarios where the stderr contains a nested CLIXML element like
# '<# CLIXML\r\n<# CLIXML\r\n<Objs>...</Objs><Objs>...</Objs>'.
# Parse each individual <Objs> element and add the error strings to our stderr list.
# https://github.com/ansible/ansible/issues/69550
while data:
start_idx = data.find(b"<Objs ")
end_idx = data.find(b"</Objs>")
if start_idx == -1 or end_idx == -1:
break
end_idx += 7
current_element = data[start_idx:end_idx]
data = data[end_idx:]
clixml = ET.fromstring(current_element)
namespace_match = re.match(r'{(.*)}', clixml.tag)
namespace = f"{{{namespace_match.group(1)}}}" if namespace_match else ""
entries = clixml.findall("./%sS" % namespace)
if not entries:
continue
# If this is a new CLIXML element, add a newline to separate the messages.
if lines:
lines.append("\r\n")
for string_entry in entries:
actual_stream = string_entry.attrib.get('S', None)
if actual_stream != stream:
continue
b_line = (string_entry.text or "").encode("utf-16-be")
b_escaped = re.sub(_STRING_DESERIAL_FIND, rplcr, b_line)
lines.append(b_escaped.decode("utf-16-be", errors="surrogatepass"))
return to_bytes(''.join(lines), errors="surrogatepass")
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles
# Powershell is handled differently. It's selected when winrm is the
# connection
COMPATIBLE_SHELLS = frozenset() # type: frozenset[str]
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'powershell'
# We try catch as some connection plugins don't have a console (PSRP).
_CONSOLE_ENCODING = "try { [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding } catch {}"
_SHELL_REDIRECT_ALLNULL = '> $null'
_SHELL_AND = ';'
# Used by various parts of Ansible to do Windows specific changes
_IS_WINDOWS = True
# TODO: add binary module support
def env_prefix(self, **kwargs):
# powershell/winrm env handling is handled in the exec wrapper
return ""
def join_path(self, *args):
# use normpath() to remove doubled slashed and convert forward to backslashes
parts = [ntpath.normpath(self._unquote(arg)) for arg in args]
# Because ntpath.join treats any component that begins with a backslash as an absolute path,
# we have to strip slashes from at least the beginning, otherwise join will ignore all previous
# path components except for the drive.
return ntpath.join(parts[0], *[part.strip('\\') for part in parts[1:]])
def get_remote_filename(self, pathname):
# powershell requires that script files end with .ps1
base_name = os.path.basename(pathname.strip())
name, ext = os.path.splitext(base_name.strip())
if ext.lower() not in ['.ps1', '.exe']:
return name + '.ps1'
return base_name.strip()
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, paths, mode):
raise NotImplementedError('chmod is not implemented for Powershell')
def chown(self, paths, user):
raise NotImplementedError('chown is not implemented for Powershell')
def set_user_facl(self, paths, user, mode):
raise NotImplementedError('set_user_facl is not implemented for Powershell')
def remove(self, path, recurse=False):
path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item '%s' -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item '%s' -Force;''' % path)
def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
# Windows does not have an equivalent for the system temp files, so
# the param is ignored
if not basefile:
basefile = self.__class__._generate_temp_dir_name()
basefile = self._escape(self._unquote(basefile))
basetmpdir = self._escape(tmpdir if tmpdir else self.get_option('remote_tmp'))
script = f'''
{self._CONSOLE_ENCODING}
$tmp_path = [System.Environment]::ExpandEnvironmentVariables('{basetmpdir}')
$tmp = New-Item -Type Directory -Path $tmp_path -Name '{basefile}'
Write-Output -InputObject $tmp.FullName
'''
return self._encode_script(script.strip())
def expand_user(self, user_home_path, username=''):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
user_home_path = self._unquote(user_home_path)
if user_home_path == '~':
script = 'Write-Output (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = "Write-Output ((Get-Location).Path + '%s')" % self._escape(user_home_path[1:])
else:
script = "Write-Output '%s'" % self._escape(user_home_path)
return self._encode_script(f"{self._CONSOLE_ENCODING}; {script}")
def exists(self, path):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path '%s')
{
$res = 0;
}
Else
{
$res = 1;
}
Write-Output '$res';
Exit $res;
''' % path
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf '%(path)s')
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open('%(path)s', [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container '%(path)s')
{
Write-Output "3";
}
Else
{
Write-Output "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, arg_path=None):
bootstrap_wrapper = pkgutil.get_data("ansible.executor.powershell", "bootstrap_wrapper.ps1")
# pipelining bypass
if cmd == '':
return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
# non-pipelining
cmd_parts = shlex.split(cmd, posix=False)
cmd_parts = list(map(to_text, cmd_parts))
if shebang and shebang.lower() == '#!powershell':
if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
# we're running a module via the bootstrap wrapper
cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
return wrapper_cmd
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
elif not shebang:
# The module is assumed to be a binary
cmd_parts[0] = self._unquote(cmd_parts[0])
cmd_parts.append(arg_path)
script = '''
Try
{
%s
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
''' % (env_string, ' '.join(cmd_parts))
return self._encode_script(script, preserve_rc=False)
def wrap_for_exec(self, cmd):
return '& %s; exit $LASTEXITCODE' % cmd
def _unquote(self, value):
"""Remove any matching quotes that wrap the given value."""
value = to_text(value or '')
m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
if m:
return m.group(1)
m = re.match(r'^\s*?"(.*?)"\s*?$', value)
if m:
return m.group(1)
return value
def _escape(self, value):
"""Return value escaped for use in PowerShell single quotes."""
# There are 5 chars that need to be escaped in a single quote.
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
return re.compile(u"(['\u2018\u2019\u201a\u201b])").sub(u'\\1\\1', value)
def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
"""Convert a PowerShell script to a single base64-encoded command."""
script = to_text(script)
if script == u'-':
cmd_parts = _common_args + ['-Command', '-']
else:
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
# try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
# NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
if preserve_rc:
script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
% script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
| 13,134
|
Python
|
.py
| 275
| 38.025455
| 167
| 0.602966
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,413
|
cmd.py
|
ansible_ansible/lib/ansible/plugins/shell/cmd.py
|
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: cmd
version_added: '2.8'
short_description: Windows Command Prompt
description:
- Used with the 'ssh' connection plugin and no C(DefaultShell) has been set on the Windows host.
extends_documentation_fragment:
- shell_windows
"""
import re
from ansible.plugins.shell.powershell import ShellModule as PSShellModule
# these are the metachars that have a special meaning in cmd that we want to escape when quoting
_find_unsafe = re.compile(r'[\s\(\)\%\!^\"\<\>\&\|]').search
class ShellModule(PSShellModule):
# Common shell filenames that this plugin handles
COMPATIBLE_SHELLS = frozenset() # type: frozenset[str]
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'cmd'
_SHELL_REDIRECT_ALLNULL = '>nul 2>&1'
_SHELL_AND = '&&'
# Used by various parts of Ansible to do Windows specific changes
_IS_WINDOWS = True
def quote(self, cmd):
# cmd does not support single quotes that the shlex_quote uses. We need to override the quoting behaviour to
# better match cmd.exe.
# https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
# Return an empty argument
if not cmd:
return '""'
if _find_unsafe(cmd) is None:
return cmd
# Escape the metachars as we are quoting the string to stop cmd from interpreting that metachar. For example
# 'file &whoami.exe' would result in 'file $(whoami.exe)' instead of the literal string
# https://stackoverflow.com/questions/3411771/multiple-character-replace-with-python
for c in '^()%!"<>&|': # '^' must be the first char that we scan and replace
if c in cmd:
# I can't find any docs that explicitly say this but to escape ", it needs to be prefixed with \^.
cmd = cmd.replace(c, ("\\^" if c == '"' else "^") + c)
return '^"' + cmd + '^"'
| 2,170
|
Python
|
.py
| 42
| 45.761905
| 136
| 0.683065
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,414
|
password.py
|
ansible_ansible/lib/ansible/plugins/lookup/password.py
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2013, Javier Candeira <javier@candeira.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: password
version_added: "1.1"
author:
- Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
- Javier Candeira (!UNKNOWN) <javier@candeira.com>
- Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
short_description: retrieve or generate a random password, stored in a file
description:
- Generates a random plaintext password and stores it in a file at a given filepath.
- If the file exists previously, it will retrieve its contents, behaving just like with_file.
- 'Usage of variables like C("{{ inventory_hostname }}") in the filepath can be used to set up random passwords per host,
which simplifies password management in C("host_vars") variables.'
- A special case is using /dev/null as a path. The password lookup will generate a new random password each time,
but will not write it to /dev/null. This can be used when you need a password without storing it on the controller.
options:
_terms:
description:
- path to the file that stores/will store the passwords
required: True
encrypt:
description:
- Which hash scheme to encrypt the returning password, should be one hash scheme from C(passlib.hash);
V(md5_crypt), V(bcrypt), V(sha256_crypt), V(sha512_crypt).
- If not provided, the password will be returned in plain text.
- Note that the password is always stored as plain text, only the returning password is encrypted.
- Encrypt also forces saving the salt value for idempotence.
- Note that before 2.6 this option was incorrectly labeled as a boolean for a long time.
ident:
description:
- Specify version of Bcrypt algorithm to be used while using O(encrypt) as V(bcrypt).
- The parameter is only available for V(bcrypt) - U(https://passlib.readthedocs.io/en/stable/lib/passlib.hash.bcrypt.html#passlib.hash.bcrypt).
- Other hash types will simply ignore this parameter.
- 'Valid values for this parameter are: V(2), V(2a), V(2y), V(2b).'
type: string
version_added: "2.12"
chars:
version_added: "1.4"
description:
- A list of names that compose a custom character set in the generated passwords.
- This parameter defines the possible character sets in the resulting password, not the required character sets.
If you want to require certain character sets for passwords, you can use the P(community.general.random_string#lookup) lookup plugin.
- 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9, and punctuation (". , : - _").'
- "They can be either parts of Python's string module attributes or represented literally ( :, -)."
- "Though string modules can vary by Python version, valid values for both major releases include:
'ascii_lowercase', 'ascii_uppercase', 'digits', 'hexdigits', 'octdigits', 'printable', 'punctuation' and 'whitespace'."
- Be aware that Python's 'hexdigits' includes lower and upper case versions of a-f, so it is not a good choice as it doubles
the chances of those values for systems that won't distinguish case, distorting the expected entropy.
- "when using a comma separated string, to enter comma use two commas ',,' somewhere - preferably at the end.
Quotes and double quotes are not supported."
type: list
elements: str
default: ['ascii_letters', 'digits', ".,:-_"]
length:
description: The length of the generated password.
default: 20
type: integer
seed:
version_added: "2.12"
description:
- A seed to initialize the random number generator.
- Identical seeds will yield identical passwords.
- Use this for random-but-idempotent password generation.
type: str
notes:
- A great alternative to the password lookup plugin,
if you don't need to generate random passwords on a per-host basis,
would be to use Vault in playbooks.
Read the documentation there and consider using it first,
it will be more desirable for most applications.
- If the file already exists, no data will be written to it.
If the file has contents, those contents will be read in as the password.
Empty files cause the password to return as an empty string.
- 'As all lookups, this runs on the Ansible host as the user running the playbook, and "become" does not apply,
the target file must be readable by the playbook user, or, if it does not exist,
the playbook user must have sufficient privileges to create it.
(So, for example, attempts to write into areas such as /etc will fail unless the entire playbook is being run as root).'
"""
EXAMPLES = """
- name: create a mysql user with a random password
community.mysql.mysql_user:
name: "{{ client }}"
password: "{{ lookup('ansible.builtin.password', 'credentials/' + client + '/' + tier + '/' + role + '/mysqlpassword', length=15) }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using only ascii letters
community.mysql.mysql_user:
name: "{{ client }}"
password: "{{ lookup('ansible.builtin.password', '/tmp/passwordfile', chars=['ascii_letters']) }}"
priv: '{{ client }}_{{ tier }}_{{ role }}.*:ALL'
- name: create a mysql user with an 8 character random password using only digits
community.mysql.mysql_user:
name: "{{ client }}"
password: "{{ lookup('ansible.builtin.password', '/tmp/passwordfile', length=8, chars=['digits']) }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using many different char sets
community.mysql.mysql_user:
name: "{{ client }}"
password: "{{ lookup('ansible.builtin.password', '/tmp/passwordfile', chars=['ascii_letters', 'digits', 'punctuation']) }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create lowercase 8 character name for Kubernetes pod name
ansible.builtin.set_fact:
random_pod_name: "web-{{ lookup('ansible.builtin.password', '/dev/null', chars=['ascii_lowercase', 'digits'], length=8) }}"
- name: create random but idempotent password
ansible.builtin.set_fact:
password: "{{ lookup('ansible.builtin.password', '/dev/null', seed=inventory_hostname) }}"
"""
RETURN = """
_raw:
description:
- a password
type: list
elements: str
"""
import os
import string
import time
import hashlib
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.six import string_types
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
from ansible.utils.encrypt import BaseHash, do_encrypt, random_password, random_salt
from ansible.utils.path import makedirs_safe
VALID_PARAMS = frozenset(('length', 'encrypt', 'chars', 'ident', 'seed'))
def _read_password_file(b_path):
"""Read the contents of a password file and return it
:arg b_path: A byte string containing the path to the password file
:returns: a text string containing the contents of the password file or
None if no password file was present.
"""
content = None
if os.path.exists(b_path):
with open(b_path, 'rb') as f:
b_content = f.read().rstrip()
content = to_text(b_content, errors='surrogate_or_strict')
return content
def _gen_candidate_chars(characters):
"""Generate a string containing all valid chars as defined by ``characters``
:arg characters: A list of character specs. The character specs are
shorthand names for sets of characters like 'digits', 'ascii_letters',
or 'punctuation' or a string to be included verbatim.
The values of each char spec can be:
* a name of an attribute in the 'strings' module ('digits' for example).
The value of the attribute will be added to the candidate chars.
* a string of characters. If the string isn't an attribute in 'string'
module, the string will be directly added to the candidate chars.
For example::
characters=['digits', '?|']``
will match ``string.digits`` and add all ascii digits. ``'?|'`` will add
the question mark and pipe characters directly. Return will be the string::
u'0123456789?|'
"""
chars = []
for chars_spec in characters:
# getattr from string expands things like "ascii_letters" and "digits"
# into a set of characters.
chars.append(to_text(getattr(string, to_native(chars_spec), chars_spec), errors='strict'))
chars = u''.join(chars).replace(u'"', u'').replace(u"'", u'')
return chars
def _parse_content(content):
"""parse our password data format into password and salt
:arg content: The data read from the file
:returns: password and salt
"""
password = content
salt = None
ident = None
salt_slug = u' salt='
ident_slug = u' ident='
rem = u''
try:
sep = content.rindex(salt_slug)
except ValueError:
# No salt
pass
else:
rem = content[sep + len(salt_slug):]
password = content[:sep]
if rem:
try:
sep = rem.rindex(ident_slug)
except ValueError:
# no ident
salt = rem
else:
ident = rem[sep + len(ident_slug):]
salt = rem[:sep]
return password, salt, ident
def _format_content(password, salt, encrypt=None, ident=None):
"""Format the password and salt for saving
:arg password: the plaintext password to save
:arg salt: the salt to use when encrypting a password
:arg encrypt: Which method the user requests that this password is encrypted.
Note that the password is saved in clear. Encrypt just tells us if we
must save the salt value for idempotence. Defaults to None.
:arg ident: Which version of BCrypt algorithm to be used.
Valid only if value of encrypt is bcrypt.
Defaults to None.
:returns: a text string containing the formatted information
.. warning:: Passwords are saved in clear. This is because the playbooks
expect to get cleartext passwords from this lookup.
"""
if not encrypt and not salt:
return password
# At this point, the calling code should have assured us that there is a salt value.
if not salt:
raise AnsibleAssertionError('_format_content was called with encryption requested but no salt value')
if ident:
return u'%s salt=%s ident=%s' % (password, salt, ident)
return u'%s salt=%s' % (password, salt)
def _write_password_file(b_path, content):
b_pathdir = os.path.dirname(b_path)
makedirs_safe(b_pathdir, mode=0o700)
with open(b_path, 'wb') as f:
os.chmod(b_path, 0o600)
b_content = to_bytes(content, errors='surrogate_or_strict') + b'\n'
f.write(b_content)
def _get_lock(b_path):
"""Get the lock for writing password file."""
first_process = False
b_pathdir = os.path.dirname(b_path)
lockfile_name = to_bytes("%s.ansible_lockfile" % hashlib.sha1(b_path).hexdigest())
lockfile = os.path.join(b_pathdir, lockfile_name)
if not os.path.exists(lockfile) and b_path != to_bytes('/dev/null'):
try:
makedirs_safe(b_pathdir, mode=0o700)
fd = os.open(lockfile, os.O_CREAT | os.O_EXCL)
os.close(fd)
first_process = True
except OSError as e:
if e.strerror != 'File exists':
raise
counter = 0
# if the lock is got by other process, wait until it's released
while os.path.exists(lockfile) and not first_process:
time.sleep(2 ** counter)
if counter >= 2:
raise AnsibleError("Password lookup cannot get the lock in 7 seconds, abort..."
"This may caused by un-removed lockfile"
"you can manually remove it from controller machine at %s and try again" % lockfile)
counter += 1
return first_process, lockfile
def _release_lock(lockfile):
"""Release the lock so other processes can read the password file."""
if os.path.exists(lockfile):
os.remove(lockfile)
class LookupModule(LookupBase):
def _parse_parameters(self, term):
"""Hacky parsing of params
See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
and the first_found lookup For how we want to fix this later
"""
first_split = term.split(' ', 1)
if len(first_split) <= 1:
# Only a single argument given, therefore it's a path
relpath = term
params = dict()
else:
relpath = first_split[0]
params = parse_kv(first_split[1])
if '_raw_params' in params:
# Spaces in the path?
relpath = u' '.join((relpath, params['_raw_params']))
del params['_raw_params']
# Check that we parsed the params correctly
if not term.startswith(relpath):
# Likely, the user had a non parameter following a parameter.
# Reject this as a user typo
raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
# No _raw_params means we already found the complete path when
# we split it initially
# Check for invalid parameters. Probably a user typo
invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
if invalid_params:
raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
# update options with what we got
if params:
self.set_options(direct=params)
# chars still might need more
chars = params.get('chars', self.get_option('chars'))
if chars and isinstance(chars, string_types):
tmp_chars = []
if u',,' in chars:
tmp_chars.append(u',')
tmp_chars.extend(c for c in chars.replace(u',,', u',').split(u',') if c)
self.set_option('chars', tmp_chars)
# return processed params
for field in VALID_PARAMS:
params[field] = self.get_option(field)
return relpath, params
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
self.set_options(var_options=variables, direct=kwargs)
changed = None
relpath, params = self._parse_parameters(term)
path = self._loader.path_dwim(relpath)
b_path = to_bytes(path, errors='surrogate_or_strict')
chars = _gen_candidate_chars(params['chars'])
ident = None
first_process = None
lockfile = None
try:
# make sure only one process finishes all the job first
first_process, lockfile = _get_lock(b_path)
content = _read_password_file(b_path)
if content is None or b_path == to_bytes('/dev/null'):
plaintext_password = random_password(params['length'], chars, params['seed'])
salt = None
changed = True
else:
plaintext_password, salt, ident = _parse_content(content)
encrypt = params['encrypt']
if encrypt and not salt:
changed = True
try:
salt = random_salt(BaseHash.algorithms[encrypt].salt_size)
except KeyError:
salt = random_salt()
if not ident:
ident = params['ident']
elif params['ident'] and ident != params['ident']:
raise AnsibleError('The ident parameter provided (%s) does not match the stored one (%s).' % (ident, params['ident']))
if encrypt and not ident:
try:
ident = BaseHash.algorithms[encrypt].implicit_ident
except KeyError:
ident = None
if ident:
changed = True
if changed and b_path != to_bytes('/dev/null'):
content = _format_content(plaintext_password, salt, encrypt=encrypt, ident=ident)
_write_password_file(b_path, content)
finally:
if first_process:
# let other processes continue
_release_lock(lockfile)
if encrypt:
password = do_encrypt(plaintext_password, encrypt, salt=salt, ident=ident)
ret.append(password)
else:
ret.append(plaintext_password)
return ret
| 17,585
|
Python
|
.py
| 350
| 41.02
| 153
| 0.637153
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,415
|
template.py
|
ansible_ansible/lib/ansible/plugins/lookup/template.py
|
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2012-17, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: template
author: Michael DeHaan
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
description:
- Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
options:
_terms:
description: list of files to template
convert_data:
type: bool
description:
- Whether to convert YAML into data. If False, strings that are YAML will be left untouched.
- Mutually exclusive with the jinja2_native option.
default: true
variable_start_string:
description: The string marking the beginning of a print statement.
default: '{{'
version_added: '2.8'
type: str
variable_end_string:
description: The string marking the end of a print statement.
default: '}}'
version_added: '2.8'
type: str
jinja2_native:
description:
- Controls whether to use Jinja2 native types.
- It is off by default even if global jinja2_native is True.
- Has no effect if global jinja2_native is False.
- This offers more flexibility than the template module which does not use Jinja2 native types at all.
- Mutually exclusive with the convert_data option.
default: False
version_added: '2.11'
type: bool
template_vars:
description: A dictionary, the keys become additional variables available for templating.
default: {}
version_added: '2.3'
type: dict
comment_start_string:
description: The string marking the beginning of a comment statement.
version_added: '2.12'
type: str
default: '{#'
comment_end_string:
description: The string marking the end of a comment statement.
version_added: '2.12'
type: str
default: '#}'
seealso:
- ref: playbook_task_paths
description: Search paths used for relative templates.
"""
EXAMPLES = """
- name: show templating results
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.template', './some_template.j2') }}"
- name: show templating results with different variable start and end string
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
- name: show templating results with different comment start and end string
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.template', './some_template.j2', comment_start_string='[#', comment_end_string='#]') }}"
"""
RETURN = """
_raw:
description: file(s) content after templating
type: list
elements: raw
"""
from copy import deepcopy
import os
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.common.text.converters import to_text
from ansible.template import generate_ansible_template_vars, AnsibleEnvironment
from ansible.utils.display import Display
from ansible.utils.native_jinja import NativeJinjaText
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
self.set_options(var_options=variables, direct=kwargs)
# capture options
convert_data_p = self.get_option('convert_data')
lookup_template_vars = self.get_option('template_vars')
jinja2_native = self.get_option('jinja2_native') and C.DEFAULT_JINJA2_NATIVE
variable_start_string = self.get_option('variable_start_string')
variable_end_string = self.get_option('variable_end_string')
comment_start_string = self.get_option('comment_start_string')
comment_end_string = self.get_option('comment_end_string')
if jinja2_native:
templar = self._templar
else:
templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment)
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
b_template_data, show_data = self._loader._get_file_contents(lookupfile)
template_data = to_text(b_template_data, errors='surrogate_or_strict')
# set jinja2 internal search path for includes
searchpath = variables.get('ansible_search_path', [])
if searchpath:
# our search paths aren't actually the proper ones for jinja includes.
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
searchpath.insert(0, os.path.dirname(lookupfile))
# The template will have access to all existing variables,
# plus some added by ansible (e.g., template_{path,mtime}),
# plus anything passed to the lookup with the template_vars=
# argument.
vars = deepcopy(variables)
vars.update(generate_ansible_template_vars(term, lookupfile))
vars.update(lookup_template_vars)
with templar.set_temporary_context(available_variables=vars, searchpath=searchpath):
overrides = dict(
variable_start_string=variable_start_string,
variable_end_string=variable_end_string,
comment_start_string=comment_start_string,
comment_end_string=comment_end_string
)
res = templar.template(template_data, preserve_trailing_newlines=True,
convert_data=convert_data_p, escape_backslashes=False,
overrides=overrides)
if (C.DEFAULT_JINJA2_NATIVE and not jinja2_native) or not convert_data_p:
# jinja2_native is true globally but off for the lookup, we need this text
# not to be processed by literal_eval anywhere in Ansible
res = NativeJinjaText(res)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
| 7,112
|
Python
|
.py
| 146
| 38.130137
| 159
| 0.636744
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,416
|
csvfile.py
|
ansible_ansible/lib/ansible/plugins/lookup/csvfile.py
|
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
name: csvfile
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.5"
short_description: read data from a TSV or CSV file
description:
- The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
The lookup looks for the row where the first column matches keyname (which can be multiple words)
and returns the value in the O(col) column (default 1, which indexed from 0 means the second column in the file).
- At least one keyname is required, provided as a positional argument(s) to the lookup.
options:
col:
description: column to return (0 indexed).
default: "1"
keycol:
description: column to search in (0 indexed).
default: 0
type: int
version_added: "2.17"
default:
description: what to return if the value is not found in the file.
delimiter:
description: field separator in the file, for a tab you can specify V(TAB) or V(\\t).
default: TAB
file:
description: name of the CSV/TSV file to open.
default: ansible.csv
encoding:
description: Encoding (character set) of the used CSV file.
default: utf-8
version_added: "2.1"
notes:
- The default is for TSV files (tab delimited) not CSV (comma delimited) ... yes the name is misleading.
- As of version 2.11, the search parameter (text that must match the first column of the file) and filename parameter can be multi-word.
- For historical reasons, in the search keyname, quotes are treated
literally and cannot be used around the string unless they appear
(escaped as required) in the first column of the file you are parsing.
seealso:
- ref: playbook_task_paths
description: Search paths used for relative files.
"""
EXAMPLES = """
- name: Match 'Li' on the first column, return the second column (0 based index)
ansible.builtin.debug: msg="The atomic number of Lithium is {{ lookup('ansible.builtin.csvfile', 'Li file=elements.csv delimiter=,') }}"
- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
ansible.builtin.debug: msg="The atomic mass of Lithium is {{ lookup('ansible.builtin.csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
# Contents of bgp_neighbors.csv
# 127.0.0.1,10.0.0.1,24,nones,lola,pepe,127.0.0.2
# 128.0.0.1,10.1.0.1,20,notes,lolita,pepito,128.0.0.2
# 129.0.0.1,10.2.0.1,23,nines,aayush,pepete,129.0.0.2
- name: Define values from CSV file, this reads file in one go, but you could also use col= to read each in it's own lookup.
ansible.builtin.set_fact:
'{{ columns[item|int] }}': "{{ csvline }}"
vars:
csvline: "{{ lookup('csvfile', bgp_neighbor_ip, file='bgp_neighbors.csv', delimiter=',', col=item) }}"
columns: ['loop_ip', 'int_ip', 'int_mask', 'int_name', 'local_as', 'neighbour_as', 'neight_int_ip']
bgp_neighbor_ip: '127.0.0.1'
loop: '{{ range(columns|length|int) }}'
delegate_to: localhost
delegate_facts: true
# Contents of people.csv
# # Last,First,Email,Extension
# Smith,Jane,jsmith@example.com,1234
- name: Specify the column (by keycol) in which the string should be searched
assert:
that:
- lookup('ansible.builtin.csvfile', 'Jane', file='people.csv', delimiter=',', col=0, keycol=1) == "Smith"
# Contents of debug.csv
# test1 ret1.1 ret2.1
# test2 ret1.2 ret2.2
# test3 ret1.3 ret2.3
- name: "Lookup multiple keynames in the first column (index 0), returning the values from the second column (index 1)"
debug:
msg: "{{ lookup('csvfile', 'test1', 'test2', file='debug.csv', delimiter=' ') }}"
- name: Lookup multiple keynames using old style syntax
debug:
msg: "{{ lookup('csvfile', term1, term2) }}"
vars:
term1: "test1 file=debug.csv delimiter=' '"
term2: "test2 file=debug.csv delimiter=' '"
"""
RETURN = """
_raw:
description:
- value(s) stored in file column
type: list
elements: str
"""
import codecs
import csv
from collections.abc import MutableSequence
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.six import PY2
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
class CSVRecoder:
"""
Iterator that reads an encoded stream and encodes the input to UTF-8
"""
def __init__(self, f, encoding='utf-8'):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def __next__(self):
return next(self.reader).encode("utf-8")
next = __next__ # For Python 2
class CSVReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
if PY2:
f = CSVRecoder(f, encoding)
else:
f = codecs.getreader(encoding)(f)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def __next__(self):
row = next(self.reader)
return [to_text(s) for s in row]
next = __next__ # For Python 2
def __iter__(self):
return self
class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1, keycol=0):
try:
f = open(to_bytes(filename), 'rb')
creader = CSVReader(f, delimiter=to_native(delimiter), encoding=encoding)
for row in creader:
if len(row) and row[keycol] == key:
return row[int(col)]
except Exception as e:
raise AnsibleError("csvfile: %s" % to_native(e))
return dflt
def run(self, terms, variables=None, **kwargs):
ret = []
self.set_options(var_options=variables, direct=kwargs)
# populate options
paramvals = self.get_options()
if not terms:
raise AnsibleError('Search key is required but was not found')
for term in terms:
kv = parse_kv(term)
if '_raw_params' not in kv:
raise AnsibleError('Search key is required but was not found')
key = kv['_raw_params']
# parameters override per term using k/v
try:
reset_params = False
for name, value in kv.items():
if name == '_raw_params':
continue
if name not in paramvals:
raise AnsibleAssertionError('%s is not a valid option' % name)
self._deprecate_inline_kv()
self.set_option(name, value)
reset_params = True
if reset_params:
paramvals = self.get_options()
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
# default is just placeholder for real tab
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'], paramvals['keycol'])
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| 7,928
|
Python
|
.py
| 178
| 36.702247
| 156
| 0.635337
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,417
|
dict.py
|
ansible_ansible/lib/ansible/plugins/lookup/dict.py
|
# (c) 2014, Kent R. Spillner <kspillner@acm.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: dict
version_added: "1.5"
short_description: returns key/value pair items from dictionaries
description:
- Takes dictionaries as input and returns a list with each item in the list being a dictionary with 'key' and 'value' as
keys to the previous dictionary's structure.
options:
_terms:
description:
- A list of dictionaries
required: True
"""
EXAMPLES = """
vars:
users:
alice:
name: Alice Appleworth
telephone: 123-456-7890
bob:
name: Bob Bananarama
telephone: 987-654-3210
tasks:
# with predefined vars
- name: Print phone records
ansible.builtin.debug:
msg: "User {{ item.key }} is {{ item.value.name }} ({{ item.value.telephone }})"
loop: "{{ lookup('ansible.builtin.dict', users) }}"
# with inline dictionary
- name: show dictionary
ansible.builtin.debug:
msg: "{{item.key}}: {{item.value}}"
with_dict: {a: 1, b: 2, c: 3}
# Items from loop can be used in when: statements
- name: set_fact when alice in key
ansible.builtin.set_fact:
alice_exists: true
loop: "{{ lookup('ansible.builtin.dict', users) }}"
when: "'alice' in item.key"
"""
RETURN = """
_list:
description:
- list of composed dictionaries with key and value
type: list
"""
from collections.abc import Mapping
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
# NOTE: can remove if with_ is removed
if not isinstance(terms, list):
terms = [terms]
results = []
for term in terms:
# Expect any type of Mapping, notably hostvars
if not isinstance(term, Mapping):
raise AnsibleError("with_dict expects a dict")
results.extend(self._flatten_hash_to_list(term))
return results
| 2,198
|
Python
|
.py
| 65
| 28
| 128
| 0.653157
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,418
|
first_found.py
|
ansible_ansible/lib/ansible/plugins/lookup/first_found.py
|
# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: first_found
author: Seth Vidal (!UNKNOWN) <skvidal@fedoraproject.org>
version_added: historical
short_description: return first file found from list
description:
- This lookup checks a list of files and paths and returns the full path to the first combination found.
- As all lookups, when fed relative paths it will try use the current task's location first and go up the chain
to the containing locations of role / play / include and so on.
- The list of files has precedence over the paths searched.
For example, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not.
- Either a list of files O(_terms) or a key O(files) with a list of files is required for this plugin to operate.
notes:
- This lookup can be used in 'dual mode', either passing a list of file names or a dictionary that has O(files) and O(paths).
options:
_terms:
description: A list of file names.
files:
description: A list of file names.
type: list
elements: string
default: []
paths:
description: A list of paths in which to look for the files.
type: list
elements: string
default: []
skip:
type: boolean
default: False
description:
- When V(True), return an empty list when no files are matched.
- This is useful when used with C(with_first_found), as an empty list return to C(with_) calls
causes the calling task to be skipped.
- When used as a template via C(lookup) or C(query), setting O(skip=True) will *not* cause the task to skip.
Tasks must handle the empty list return from the template.
- When V(False) and C(lookup) or C(query) specifies O(ignore:errors='ignore') all errors (including no file found,
but potentially others) return an empty string or an empty list respectively.
- When V(True) and C(lookup) or C(query) specifies O(ignore:errors='ignore'), no file found will return an empty
list and other potential errors return an empty string or empty list depending on the template call
(in other words return values of C(lookup) vs C(query)).
seealso:
- ref: playbook_task_paths
description: Search paths used for relative paths/files.
"""
EXAMPLES = """
- name: Set _found_file to the first existing file, raising an error if a file is not found
ansible.builtin.set_fact:
_found_file: "{{ lookup('ansible.builtin.first_found', findme) }}"
vars:
findme:
- /path/to/foo.txt
- bar.txt # will be looked in files/ dir relative to role and/or play
- /path/to/biz.txt
- name: Set _found_file to the first existing file, or an empty list if no files found
ansible.builtin.set_fact:
_found_file: "{{ lookup('ansible.builtin.first_found', files, paths=['/extra/path'], skip=True) }}"
vars:
files:
- /path/to/foo.txt
- /path/to/bar.txt
- name: Include tasks only if one of the files exist, otherwise skip the task
ansible.builtin.include_tasks:
file: "{{ item }}"
with_first_found:
- files:
- path/tasks.yaml
- path/other_tasks.yaml
skip: True
- name: Include tasks only if one of the files exists, otherwise skip
ansible.builtin.include_tasks: '{{ tasks_file }}'
when: tasks_file != ""
vars:
tasks_file: "{{ lookup('ansible.builtin.first_found', files=['tasks.yaml', 'other_tasks.yaml'], errors='ignore') }}"
- name: |
copy first existing file found to /some/file,
looking in relative directories from where the task is defined and
including any play objects that contain it
ansible.builtin.copy:
src: "{{ lookup('ansible.builtin.first_found', findme) }}"
dest: /some/file
vars:
findme:
- foo
- "{{ inventory_hostname }}"
- bar
- name: same copy but specific paths
ansible.builtin.copy:
src: "{{ lookup('ansible.builtin.first_found', params) }}"
dest: /some/file
vars:
params:
files:
- foo
- "{{ inventory_hostname }}"
- bar
paths:
- /tmp/production
- /tmp/staging
- name: INTERFACES | Create Ansible header for /etc/network/interfaces
ansible.builtin.template:
src: "{{ lookup('ansible.builtin.first_found', findme)}}"
dest: "/etc/foo.conf"
vars:
findme:
- "{{ ansible_virtualization_type }}_foo.conf"
- "default_foo.conf"
- name: read vars from first file found, use 'vars/' relative subdir
ansible.builtin.include_vars: "{{lookup('ansible.builtin.first_found', params)}}"
vars:
params:
files:
- '{{ ansible_distribution }}.yml'
- '{{ ansible_os_family }}.yml'
- default.yml
paths:
- 'vars'
"""
RETURN = """
_raw:
description:
- path to file found
type: list
elements: path
"""
import os
import re
from collections.abc import Mapping, Sequence
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleLookupError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
from ansible.utils.path import unfrackpath
def _split_on(terms, spliters=','):
termlist = []
if isinstance(terms, string_types):
termlist = re.split(r'[%s]' % ''.join(map(re.escape, spliters)), terms)
else:
# added since options will already listify
for t in terms:
termlist.extend(_split_on(t, spliters))
return termlist
class LookupModule(LookupBase):
def _process_terms(self, terms, variables, kwargs):
total_search = []
skip = False
# can use a dict instead of list item to pass inline config
for term in terms:
if isinstance(term, Mapping):
self.set_options(var_options=variables, direct=term)
files = self.get_option('files')
elif isinstance(term, string_types):
files = [term]
elif isinstance(term, Sequence):
partial, skip = self._process_terms(term, variables, kwargs)
total_search.extend(partial)
continue
else:
raise AnsibleLookupError("Invalid term supplied, can handle string, mapping or list of strings but got: %s for %s" % (type(term), term))
paths = self.get_option('paths')
# NOTE: this is used as 'global' but can be set many times?!?!?
skip = self.get_option('skip')
# magic extra splitting to create lists
filelist = _split_on(files, ',;')
pathlist = _split_on(paths, ',:;')
# create search structure
if pathlist:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
elif filelist:
# NOTE: this is now 'extend', previously it would clobber all options, but we deemed that a bug
total_search.extend(filelist)
else:
total_search.append(term)
return total_search, skip
def run(self, terms, variables, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
if not terms:
terms = self.get_option('files')
total_search, skip = self._process_terms(terms, variables, kwargs)
# NOTE: during refactor noticed that the 'using a dict' as term
# is designed to only work with 'one' otherwise inconsistencies will appear.
# see other notes below.
# actually search
subdir = getattr(self, '_subdir', 'files')
path = None
for fn in total_search:
try:
fn = self._templar.template(fn)
except (AnsibleUndefinedVariable, UndefinedError):
# NOTE: backwards compat ff behaviour is to ignore errors when vars are undefined.
# moved here from task_executor.
continue
# get subdir if set by task executor, default to files otherwise
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
# exit if we find one!
if path is not None:
return [unfrackpath(path, follow=False)]
# if we get here, no file was found
if skip:
# NOTE: global skip won't matter, only last 'skip' value in dict term
return []
raise AnsibleLookupError("No file was found when using first_found.")
| 8,985
|
Python
|
.py
| 209
| 34.899522
| 152
| 0.636728
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,419
|
fileglob.py
|
ansible_ansible/lib/ansible/plugins/lookup/fileglob.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: fileglob
author: Michael DeHaan
version_added: "1.4"
short_description: list files matching a pattern
description:
- Matches all files in a single directory, non-recursively, that match a pattern.
It calls Python's "glob" library.
options:
_terms:
description: path(s) of files to read
required: True
notes:
- Patterns are only supported on files, not directory/paths.
- See R(Ansible task paths,playbook_task_paths) to understand how file lookup occurs with paths.
- Matching is against local system files on the Ansible controller.
To iterate a list of files on a remote node, use the M(ansible.builtin.find) module.
- Returns a string list of paths joined by commas, or an empty list if no files match. For a 'true list' pass O(ignore:wantlist=True) to the lookup.
seealso:
- ref: playbook_task_paths
description: Search paths used for relative files.
"""
EXAMPLES = """
- name: Display paths of all .txt files in dir
ansible.builtin.debug: msg={{ lookup('ansible.builtin.fileglob', '/my/path/*.txt') }}
- name: Copy each file over that matches the given pattern
ansible.builtin.copy:
src: "{{ item }}"
dest: "/etc/fooapp/"
owner: "root"
mode: 0600
with_fileglob:
- "/playbooks/files/fooapp/*"
"""
RETURN = """
_list:
description:
- list of files
type: list
elements: path
"""
import os
import glob
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.common.text.converters import to_bytes, to_text
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
term_file = os.path.basename(term)
found_paths = []
if term_file != term:
found_paths.append(self.find_file_in_search_path(variables, 'files', os.path.dirname(term)))
else:
# no dir, just file, so use paths and 'files' paths instead
if 'ansible_search_path' in variables:
paths = variables['ansible_search_path']
else:
paths = [self.get_basedir(variables)]
for p in paths:
found_paths.append(os.path.join(p, 'files'))
found_paths.append(p)
for dwimmed_path in found_paths:
if dwimmed_path:
globbed = glob.glob(to_bytes(os.path.join(dwimmed_path, term_file), errors='surrogate_or_strict'))
term_results = [to_text(g, errors='surrogate_or_strict') for g in globbed if os.path.isfile(g)]
if term_results:
ret.extend(term_results)
break
return ret
| 3,056
|
Python
|
.py
| 74
| 32.905405
| 154
| 0.629081
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,420
|
config.py
|
ansible_ansible/lib/ansible/plugins/lookup/config.py
|
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: config
author: Ansible Core Team
version_added: "2.5"
short_description: Display the 'resolved' Ansible option values.
description:
- Retrieves the value of an Ansible configuration setting, resolving all sources, from defaults, ansible.cfg, environment,
CLI, and variables, but not keywords.
- The values returned assume the context of the current host or C(inventory_hostname).
- You can use C(ansible-config list) to see the global available settings, add C(-t all) to also show plugin options.
options:
_terms:
description: The option(s) to look up.
required: True
on_missing:
description: Action to take if term is missing from config
default: error
type: string
choices:
error: Issue an error message and raise fatal signal
warn: Issue a warning message and continue
skip: Silently ignore
plugin_type:
description: The type of the plugin referenced by 'plugin_name' option.
choices: ['become', 'cache', 'callback', 'cliconf', 'connection', 'httpapi', 'inventory', 'lookup', 'netconf', 'shell', 'vars']
type: string
version_added: '2.12'
plugin_name:
description: The name of the plugin for which you want to retrieve configuration settings.
type: string
version_added: '2.12'
show_origin:
description: Set this to return what configuration subsystem the value came from
(defaults, config file, environment, CLI, or variables).
type: bool
version_added: '2.16'
notes:
- Be aware that currently this lookup cannot take keywords nor delegation into account,
so for options that support keywords or are affected by delegation, it is at best a good guess or approximation.
"""
EXAMPLES = """
- name: Show configured default become user
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.config', 'DEFAULT_BECOME_USER')}}"
- name: print out role paths
ansible.builtin.debug:
msg: "These are the configured role paths: {{lookup('ansible.builtin.config', 'DEFAULT_ROLES_PATH')}}"
- name: find retry files, skip if missing that key
ansible.builtin.find:
paths: "{{lookup('ansible.builtin.config', 'RETRY_FILES_SAVE_PATH')|default(playbook_dir, True)}}"
patterns: "*.retry"
- name: see the colors
ansible.builtin.debug: msg="{{item}}"
loop: "{{lookup('ansible.builtin.config', 'COLOR_OK', 'COLOR_CHANGED', 'COLOR_SKIP', wantlist=True)}}"
- name: skip if bad value in var
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.config', config_in_var, on_missing='skip')}}"
var:
config_in_var: UNKNOWN
- name: show remote user and port for ssh connection
ansible.builtin.debug: msg={{q("ansible.builtin.config", "remote_user", "port", plugin_type="connection", plugin_name="ssh", on_missing='skip')}}
- name: show remote_tmp setting for shell (sh) plugin
ansible.builtin.debug: msg={{q("ansible.builtin.config", "remote_tmp", plugin_type="shell", plugin_name="sh")}}
"""
RETURN = """
_raw:
description:
- A list of value(s) of the key(s) in the config if show_origin is false (default)
- Optionally, a list of 2 element lists (value, origin) if show_origin is true
type: raw
"""
import ansible.plugins.loader as plugin_loader
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleLookupError, AnsibleOptionsError
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class MissingSetting(AnsibleOptionsError):
pass
def _get_plugin_config(pname, ptype, config, variables):
try:
# plugin creates settings on load, this is cached so not too expensive to redo
loader = getattr(plugin_loader, '%s_loader' % ptype)
p = loader.get(pname, class_only=True)
if p is None:
raise AnsibleLookupError('Unable to load %s plugin "%s"' % (ptype, pname))
result, origin = C.config.get_config_value_and_origin(config, plugin_type=ptype, plugin_name=p._load_name, variables=variables)
except AnsibleLookupError:
raise
except AnsibleError as e:
msg = to_native(e)
if 'was not defined' in msg:
raise MissingSetting(msg, orig_exc=e)
raise e
return result, origin
def _get_global_config(config):
try:
result = getattr(C, config)
if callable(result):
raise AnsibleLookupError('Invalid setting "%s" attempted' % config)
except AttributeError as e:
raise MissingSetting(to_native(e), orig_exc=e)
return result
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
missing = self.get_option('on_missing')
ptype = self.get_option('plugin_type')
pname = self.get_option('plugin_name')
show_origin = self.get_option('show_origin')
if (ptype or pname) and not (ptype and pname):
raise AnsibleOptionsError('Both plugin_type and plugin_name are required, cannot use one without the other')
if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']:
raise AnsibleOptionsError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing)
ret = []
for term in terms:
if not isinstance(term, string_types):
raise AnsibleOptionsError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
result = Sentinel
origin = None
try:
if pname:
result, origin = _get_plugin_config(pname, ptype, term, variables)
else:
result = _get_global_config(term)
except MissingSetting as e:
if missing == 'error':
raise AnsibleLookupError('Unable to find setting %s' % term, orig_exc=e)
elif missing == 'warn':
self._display.warning('Skipping, did not find setting %s' % term)
elif missing == 'skip':
pass # this is not needed, but added to have all 3 options stated
if result is not Sentinel:
if show_origin:
ret.append((result, origin))
else:
ret.append(result)
return ret
| 6,907
|
Python
|
.py
| 140
| 40.828571
| 151
| 0.654149
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,421
|
pipe.py
|
ansible_ansible/lib/ansible/plugins/lookup/pipe.py
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
name: pipe
author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
version_added: "0.9"
short_description: read output from a command
description:
- Run a command and return the output.
options:
_terms:
description: command(s) to run.
required: True
notes:
- Like all lookups this runs on the Ansible controller and is unaffected by other keywords, such as become,
so if you need to different permissions you must change the command or run Ansible as another user.
- Alternatively you can use a shell/command task that runs against localhost and registers the result.
- Pipe lookup internally invokes Popen with shell=True (this is required and intentional).
This type of invocation is considered a security issue if appropriate care is not taken to sanitize any user provided or variable input.
It is strongly recommended to pass user input or variable input via quote filter before using with pipe lookup.
See example section for this.
Read more about this L(Bandit B602 docs,https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html)
- The directory of the play is used as the current working directory.
"""
EXAMPLES = r"""
- name: raw result of running date command
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.pipe', 'date') }}"
- name: Always use quote filter to make sure your variables are safe to use with shell
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.pipe', 'getent passwd ' + myuser | quote ) }}"
"""
RETURN = r"""
_string:
description:
- stdout from command
type: list
elements: str
"""
import subprocess
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
# https://docs.python.org/3/library/subprocess.html#popen-constructor
#
# The shell argument (which defaults to False) specifies whether to use the
# shell as the program to execute. If shell is True, it is recommended to pass
# args as a string rather than as a sequence
#
# https://github.com/ansible/ansible/issues/6550
term = str(term)
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
| 3,031
|
Python
|
.py
| 63
| 41.285714
| 146
| 0.687183
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,422
|
vars.py
|
ansible_ansible/lib/ansible/plugins/lookup/vars.py
|
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: vars
author: Ansible Core Team
version_added: "2.5"
short_description: Lookup templated value of variables
description:
- 'Retrieves the value of an Ansible variable. Note: Only returns top level variable names.'
options:
_terms:
description: The variable names to look up.
required: True
default:
description:
- What to return if a variable is undefined.
- If no default is set, it will result in an error if any of the variables is undefined.
"""
EXAMPLES = """
- name: Show value of 'variablename'
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar) }}"
vars:
variablename: hello
myvar: ename
- name: Show default empty since i dont have 'variablnotename'
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar, default='')}}"
vars:
variablename: hello
myvar: notename
- name: Produce an error since i dont have 'variablnotename'
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar)}}"
ignore_errors: True
vars:
variablename: hello
myvar: notename
- name: find several related variables
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'ansible_play_hosts', 'ansible_play_batch', 'ansible_play_hosts_all') }}"
- name: Access nested variables
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar).sub_var }}"
ignore_errors: True
vars:
variablename:
sub_var: 12
myvar: ename
- name: alternate way to find some 'prefixed vars' in loop
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'ansible_play_' + item) }}"
loop:
- hosts
- batch
- hosts_all
"""
RETURN = """
_value:
description:
- value of the variables requested.
type: list
elements: raw
"""
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if variables is not None:
self._templar.available_variables = variables
myvars = getattr(self._templar, '_available_variables', {})
self.set_options(var_options=variables, direct=kwargs)
default = self.get_option('default')
ret = []
for term in terms:
if not isinstance(term, string_types):
raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
try:
try:
value = myvars[term]
except KeyError:
try:
value = myvars['hostvars'][myvars['inventory_hostname']][term]
except KeyError:
raise AnsibleUndefinedVariable('No variable found with this name: %s' % term)
ret.append(self._templar.template(value, fail_on_undefined=True))
except AnsibleUndefinedVariable:
if default is not None:
ret.append(default)
else:
raise
return ret
| 3,424
|
Python
|
.py
| 88
| 31.693182
| 137
| 0.648689
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,423
|
unvault.py
|
ansible_ansible/lib/ansible/plugins/lookup/unvault.py
|
# (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: unvault
author: Ansible Core Team
version_added: "2.10"
short_description: read vaulted file(s) contents
description:
- This lookup returns the contents from vaulted (or not) file(s) on the Ansible controller's file system.
options:
_terms:
description: path(s) of files to read
required: True
notes:
- This lookup does not understand 'globbing' nor shell environment variables.
seealso:
- ref: playbook_task_paths
description: Search paths used for relative files.
"""
EXAMPLES = """
- ansible.builtin.debug: msg="the value of foo.txt is {{ lookup('ansible.builtin.unvault', '/etc/foo.txt') | string | trim }}"
"""
RETURN = """
_raw:
description:
- content of file(s) as bytes
type: list
elements: raw
"""
from ansible.errors import AnsibleParserError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.common.text.converters import to_text
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
self.set_options(var_options=variables, direct=kwargs)
for term in terms:
display.debug("Unvault lookup term: %s" % term)
# Find the file in the expected search path
lookupfile = self.find_file_in_search_path(variables, 'files', term)
display.vvvv(u"Unvault lookup found %s" % lookupfile)
if lookupfile:
actual_file = self._loader.get_real_file(lookupfile, decrypt=True)
with open(actual_file, 'rb') as f:
b_contents = f.read()
ret.append(to_text(b_contents))
else:
raise AnsibleParserError('Unable to find file matching "%s" ' % term)
return ret
| 2,049
|
Python
|
.py
| 52
| 32.5
| 126
| 0.660282
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,424
|
file.py
|
ansible_ansible/lib/ansible/plugins/lookup/file.py
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: file
author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
version_added: "0.9"
short_description: read file contents
description:
- This lookup returns the contents from a file on the Ansible controller's file system.
options:
_terms:
description: path(s) of files to read
required: True
rstrip:
description: whether or not to remove whitespace from the ending of the looked-up file
type: bool
required: False
default: True
lstrip:
description: whether or not to remove whitespace from the beginning of the looked-up file
type: bool
required: False
default: False
notes:
- if read in variable context, the file can be interpreted as YAML if the content is valid to the parser.
- this lookup does not understand 'globbing', use the fileglob lookup instead.
seealso:
- ref: playbook_task_paths
description: Search paths used for relative files.
"""
EXAMPLES = """
- ansible.builtin.debug:
msg: "the value of foo.txt is {{ lookup('ansible.builtin.file', '/etc/foo.txt') }}"
- name: display multiple file contents
ansible.builtin.debug: var=item
with_file:
- "/path/to/foo.txt"
- "bar.txt" # will be looked in files/ dir relative to play or in role
- "/path/to/biz.txt"
"""
RETURN = """
_raw:
description:
- content of file(s)
type: list
elements: str
"""
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleLookupError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.common.text.converters import to_text
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
self.set_options(var_options=variables, direct=kwargs)
for term in terms:
display.debug("File lookup term: %s" % term)
# Find the file in the expected search path
try:
lookupfile = self.find_file_in_search_path(variables, 'files', term, ignore_missing=True)
display.vvvv(u"File lookup using %s as file" % lookupfile)
if lookupfile:
b_contents, show_data = self._loader._get_file_contents(lookupfile)
contents = to_text(b_contents, errors='surrogate_or_strict')
if self.get_option('lstrip'):
contents = contents.lstrip()
if self.get_option('rstrip'):
contents = contents.rstrip()
ret.append(contents)
else:
# TODO: only add search info if abs path?
raise AnsibleOptionsError("file not found, use -vvvvv to see paths searched")
except AnsibleError as e:
raise AnsibleLookupError("The 'file' lookup had an issue accessing the file '%s'" % term, orig_exc=e)
return ret
| 3,271
|
Python
|
.py
| 78
| 33.653846
| 117
| 0.643823
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,425
|
url.py
|
ansible_ansible/lib/ansible/plugins/lookup/url.py
|
# (c) 2015, Brian Coca <bcoca@ansible.com>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: url
author: Brian Coca (@bcoca)
version_added: "1.9"
short_description: return contents from URL
description:
- Returns the content of the URL requested to be used as data in play.
options:
_terms:
description: urls to query
validate_certs:
description: Flag to control SSL certificate validation
type: boolean
default: True
split_lines:
description: Flag to control if content is returned as a list of lines or as a single text blob
type: boolean
default: True
use_proxy:
description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
type: boolean
default: True
username:
description: Username to use for HTTP authentication.
type: string
version_added: "2.8"
password:
description: Password to use for HTTP authentication.
type: string
version_added: "2.8"
headers:
description: HTTP request headers
type: dictionary
default: {}
version_added: "2.9"
force:
description: Whether or not to set "cache-control" header with value "no-cache"
type: boolean
version_added: "2.10"
default: False
vars:
- name: ansible_lookup_url_force
env:
- name: ANSIBLE_LOOKUP_URL_FORCE
ini:
- section: url_lookup
key: force
timeout:
description: How long to wait for the server to send data before giving up
type: float
version_added: "2.10"
default: 10
vars:
- name: ansible_lookup_url_timeout
env:
- name: ANSIBLE_LOOKUP_URL_TIMEOUT
ini:
- section: url_lookup
key: timeout
http_agent:
description: User-Agent to use in the request. The default was changed in 2.11 to V(ansible-httpget).
type: string
version_added: "2.10"
default: ansible-httpget
vars:
- name: ansible_lookup_url_agent
env:
- name: ANSIBLE_LOOKUP_URL_AGENT
ini:
- section: url_lookup
key: agent
force_basic_auth:
description: Force basic authentication
type: boolean
version_added: "2.10"
default: False
vars:
- name: ansible_lookup_url_force_basic_auth
env:
- name: ANSIBLE_LOOKUP_URL_FORCE_BASIC_AUTH
ini:
- section: url_lookup
key: force_basic_auth
follow_redirects:
description: String of urllib2, all/yes, safe, none to determine how redirects are followed
type: string
version_added: "2.10"
default: 'urllib2'
vars:
- name: ansible_lookup_url_follow_redirects
env:
- name: ANSIBLE_LOOKUP_URL_FOLLOW_REDIRECTS
ini:
- section: url_lookup
key: follow_redirects
choices:
all: Will follow all redirects.
none: Will not follow any redirects.
safe: Only redirects doing GET or HEAD requests will be followed.
urllib2: Defer to urllib2 behavior (As of writing this follows HTTP redirects).
'no': (DEPRECATED, removed in 2.22) alias of V(none).
'yes': (DEPRECATED, removed in 2.22) alias of V(all).
use_gssapi:
description:
- Use GSSAPI handler of requests
- As of Ansible 2.11, GSSAPI credentials can be specified with O(username) and O(password).
type: boolean
version_added: "2.10"
default: False
vars:
- name: ansible_lookup_url_use_gssapi
env:
- name: ANSIBLE_LOOKUP_URL_USE_GSSAPI
ini:
- section: url_lookup
key: use_gssapi
use_netrc:
description:
- Determining whether to use credentials from ``~/.netrc`` file
- By default .netrc is used with Basic authentication headers
- When set to False, .netrc credentials are ignored
type: boolean
version_added: "2.14"
default: True
vars:
- name: ansible_lookup_url_use_netrc
env:
- name: ANSIBLE_LOOKUP_URL_USE_NETRC
ini:
- section: url_lookup
key: use_netrc
unix_socket:
description: String of file system path to unix socket file to use when establishing connection to the provided url
type: string
version_added: "2.10"
vars:
- name: ansible_lookup_url_unix_socket
env:
- name: ANSIBLE_LOOKUP_URL_UNIX_SOCKET
ini:
- section: url_lookup
key: unix_socket
ca_path:
description: String of file system path to CA cert bundle to use
type: string
version_added: "2.10"
vars:
- name: ansible_lookup_url_ca_path
env:
- name: ANSIBLE_LOOKUP_URL_CA_PATH
ini:
- section: url_lookup
key: ca_path
unredirected_headers:
description: A list of headers to not attach on a redirected request
type: list
elements: string
version_added: "2.10"
vars:
- name: ansible_lookup_url_unredir_headers
env:
- name: ANSIBLE_LOOKUP_URL_UNREDIR_HEADERS
ini:
- section: url_lookup
key: unredirected_headers
ciphers:
description:
- SSL/TLS Ciphers to use for the request
- 'When a list is provided, all ciphers are joined in order with C(:)'
- See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
for more details.
- The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
type: list
elements: string
version_added: '2.14'
vars:
- name: ansible_lookup_url_ciphers
env:
- name: ANSIBLE_LOOKUP_URL_CIPHERS
ini:
- section: url_lookup
key: ciphers
"""
EXAMPLES = """
- name: url lookup splits lines by default
ansible.builtin.debug: msg="{{item}}"
loop: "{{ lookup('ansible.builtin.url', 'https://github.com/gremlin.keys', wantlist=True) }}"
- name: display ip ranges
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.url', 'https://ip-ranges.amazonaws.com/ip-ranges.json', split_lines=False) }}"
- name: url lookup using authentication
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2') }}"
- name: url lookup using basic authentication
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2', force_basic_auth='True') }}"
- name: url lookup using headers
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.url', 'https://some.private.site.com/api/service', headers={'header1':'value1', 'header2':'value2'} ) }}"
"""
RETURN = """
_list:
description: list of list of lines or content of url(s)
type: list
elements: str
"""
from urllib.error import HTTPError, URLError
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
ret = []
for term in terms:
display.vvvv("url lookup connecting to %s" % term)
if self.get_option('follow_redirects') in ('yes', 'no'):
display.deprecated(
"Using 'yes' or 'no' for 'follow_redirects' parameter is deprecated.",
version='2.22'
)
try:
response = open_url(
term, validate_certs=self.get_option('validate_certs'),
use_proxy=self.get_option('use_proxy'),
url_username=self.get_option('username'),
url_password=self.get_option('password'),
headers=self.get_option('headers'),
force=self.get_option('force'),
timeout=self.get_option('timeout'),
http_agent=self.get_option('http_agent'),
force_basic_auth=self.get_option('force_basic_auth'),
follow_redirects=self.get_option('follow_redirects'),
use_gssapi=self.get_option('use_gssapi'),
unix_socket=self.get_option('unix_socket'),
ca_path=self.get_option('ca_path'),
unredirected_headers=self.get_option('unredirected_headers'),
ciphers=self.get_option('ciphers'),
use_netrc=self.get_option('use_netrc')
)
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, to_native(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, to_native(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, to_native(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, to_native(e)))
if self.get_option('split_lines'):
for line in response.read().splitlines():
ret.append(to_text(line))
else:
ret.append(to_text(response.read()))
return ret
| 9,583
|
Python
|
.py
| 259
| 29.745174
| 149
| 0.645144
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,426
|
together.py
|
ansible_ansible/lib/ansible/plugins/lookup/together.py
|
# (c) 2013, Bradley Young <young.bradley@gmail.com>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: together
author: Bradley Young (!UNKNOWN) <young.bradley@gmail.com>
version_added: '1.3'
short_description: merges lists into synchronized list
description:
- Creates a list with the iterated elements of the supplied lists
- "To clarify with an example, [ 'a', 'b' ] and [ 1, 2 ] turn into [ ('a',1), ('b', 2) ]"
- This is basically the same as the 'zip_longest' filter and Python function
- Any 'unbalanced' elements will be substituted with 'None'
options:
_terms:
description: list of lists to merge
required: True
"""
EXAMPLES = """
- name: item.0 returns from the 'a' list, item.1 returns from the '1' list
ansible.builtin.debug:
msg: "{{ item.0 }} and {{ item.1 }}"
with_together:
- ['a', 'b', 'c', 'd']
- [1, 2, 3, 4]
"""
RETURN = """
_list:
description: synchronized list
type: list
elements: list
"""
import itertools
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def _lookup_variables(self, terms):
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_together requires at least one element in each list")
return [self._flatten(x) for x in itertools.zip_longest(*my_list, fillvalue=None)]
| 2,110
|
Python
|
.py
| 56
| 32.482143
| 95
| 0.647088
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,427
|
sequence.py
|
ansible_ansible/lib/ansible/plugins/lookup/sequence.py
|
# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: sequence
author: Jayson Vantuyl (!UNKNOWN) <jayson@aggressive.ly>
version_added: "1.0"
short_description: generate a list based on a number sequence
description:
- generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
to increment the sequence, and an optional printf-style format string.
- 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
- 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
- Starting at version 1.9.2, negative strides are allowed.
- Generated items are strings. Use Jinja2 filters to convert items to preferred type, e.g. C({{ 1 + item|int }}).
- See also Jinja2 C(range) filter as an alternative.
options:
start:
description: number at which to start the sequence
default: 1
type: integer
end:
description: number at which to end the sequence, dont use this with count
type: integer
count:
description: number of elements in the sequence, this is not to be used with end
type: integer
stride:
description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
type: integer
default: 1
format:
description: return a string with the generated number formatted in
default: "%d"
"""
EXAMPLES = """
- name: create some test users
ansible.builtin.user:
name: "{{ item }}"
state: present
groups: "evens"
with_sequence: start=0 end=32 format=testuser%02x
- name: create a series of directories with even numbers for some reason
ansible.builtin.file:
dest: "/var/stuff/{{ item }}"
state: directory
with_sequence: start=4 end=16 stride=2
- name: a simpler way to use the sequence plugin create 4 groups
ansible.builtin.group:
name: "group{{ item }}"
state: present
with_sequence: count=4
- name: the final countdown
ansible.builtin.debug:
msg: "{{item}} seconds to detonation"
with_sequence: start=10 end=0 stride=-1
- name: Use of variable
ansible.builtin.debug:
msg: "{{ item }}"
with_sequence: start=1 end="{{ end_at }}"
vars:
- end_at: 10
"""
RETURN = """
_list:
description:
- A list containing generated sequence of items
type: list
elements: str
"""
from re import compile as re_compile, IGNORECASE
from ansible.errors import AnsibleError
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
FIELDS = frozenset(('start', 'end', 'stride', 'count', 'format'))
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in FIELDS:
value = args.pop(arg, None)
if value is not None:
self.set_option(arg, value)
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %s"
% list(args.keys())
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
dummy, start, end, dummy, stride, dummy, format = match.groups()
for key in FIELDS:
value = locals().get(key, None)
if value is not None:
self.set_option(key, value)
return True
def set_fields(self):
for f in FIELDS:
setattr(self, f, self.get_option(f))
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError("must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError("can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = range(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % (i, self.format)
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
# set defaults/global
self.set_options(direct=kwargs)
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.set_fields()
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
| 8,120
|
Python
|
.py
| 199
| 32.356784
| 157
| 0.604592
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,428
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/lookup/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from abc import abstractmethod
from ansible.errors import AnsibleFileNotFound
from ansible.plugins import AnsiblePlugin
from ansible.utils.display import Display
display = Display()
__all__ = ['LookupBase']
class LookupBase(AnsiblePlugin):
def __init__(self, loader=None, templar=None, **kwargs):
super(LookupBase, self).__init__()
self._loader = loader
self._templar = templar
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
def get_basedir(self, variables):
if 'role_path' in variables:
return variables['role_path']
else:
return self._loader.get_basedir()
@staticmethod
def _flatten(terms):
ret = []
for term in terms:
if isinstance(term, (list, tuple)):
ret.extend(term)
else:
ret.append(term)
return ret
@staticmethod
def _combine(a, b):
results = []
for x in a:
for y in b:
results.append(LookupBase._flatten([x, y]))
return results
@staticmethod
def _flatten_hash_to_list(terms):
ret = []
for key in terms:
ret.append({'key': key, 'value': terms[key]})
return ret
@abstractmethod
def run(self, terms, variables=None, **kwargs):
"""
When the playbook specifies a lookup, this method is run. The
arguments to the lookup become the arguments to this method. One
additional keyword argument named ``variables`` is added to the method
call. It contains the variables available to ansible at the time the
lookup is templated. For instance::
"{{ lookup('url', 'https://toshio.fedorapeople.org/one.txt', validate_certs=True) }}"
would end up calling the lookup plugin named url's run method like this::
run(['https://toshio.fedorapeople.org/one.txt'], variables=available_variables, validate_certs=True)
Lookup plugins can be used within playbooks for looping. When this
happens, the first argument is a list containing the terms. Lookup
plugins can also be called from within playbooks to return their
values into a variable or parameter. If the user passes a string in
this case, it is converted into a list.
Errors encountered during execution should be returned by raising
AnsibleError() with a message describing the error.
Any strings returned by this method that could ever contain non-ascii
must be converted into python's unicode type as the strings will be run
through jinja2 which has this requirement. You can use::
from ansible.module_utils.common.text.converters import to_text
result_string = to_text(result_string)
"""
pass
def find_file_in_search_path(self, myvars, subdir, needle, ignore_missing=False):
"""
Return a file (needle) in the task's expected search path.
"""
if 'ansible_search_path' in myvars:
paths = myvars['ansible_search_path']
else:
paths = [self.get_basedir(myvars)]
result = None
try:
result = self._loader.path_dwim_relative_stack(paths, subdir, needle, is_role=bool('role_path' in myvars))
except AnsibleFileNotFound:
if not ignore_missing:
self._display.warning("Unable to find '%s' in expected paths (use -vvvvv to see paths)" % needle)
return result
def _deprecate_inline_kv(self):
# TODO: place holder to deprecate in future version allowing for long transition period
# self._display.deprecated('Passing inline k=v values embedded in a string to this lookup. Use direct ,k=v, k2=v2 syntax instead.', version='2.18')
pass
| 4,680
|
Python
|
.py
| 101
| 38.574257
| 155
| 0.668497
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,429
|
lines.py
|
ansible_ansible/lib/ansible/plugins/lookup/lines.py
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: lines
author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
version_added: "0.9"
short_description: read lines from command
description:
- Run one or more commands and split the output into lines, returning them as a list
options:
_terms:
description: command(s) to run
required: True
notes:
- Like all lookups, this runs on the Ansible controller and is unaffected by other keywords such as 'become'.
If you need to use different permissions, you must change the command or run Ansible as another user.
- Alternatively, you can use a shell/command task that runs against localhost and registers the result.
- The directory of the play is used as the current working directory.
"""
EXAMPLES = """
- name: We could read the file directly, but this shows output from command
ansible.builtin.debug: msg="{{ item }} is an output line from running cat on /etc/motd"
with_lines: cat /etc/motd
- name: More useful example of looping over a command result
ansible.builtin.shell: "/usr/bin/frobnicate {{ item }}"
with_lines:
- "/usr/bin/frobnications_per_host --param {{ inventory_hostname }}"
"""
RETURN = """
_list:
description:
- lines of stdout from command
type: list
elements: str
"""
import subprocess
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.common.text.converters import to_text
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.extend([to_text(l) for l in stdout.splitlines()])
else:
raise AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
return ret
| 2,254
|
Python
|
.py
| 52
| 37.884615
| 129
| 0.695255
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,430
|
indexed_items.py
|
ansible_ansible/lib/ansible/plugins/lookup/indexed_items.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: indexed_items
author: Michael DeHaan
version_added: "1.3"
short_description: rewrites lists to return 'indexed items'
description:
- use this lookup if you want to loop over an array and also get the numeric index of where you are in the array as you go
- any list given will be transformed with each resulting element having the it's previous position in item.0 and its value in item.1
options:
_terms:
description: list of items
required: True
"""
EXAMPLES = """
- name: indexed loop demo
ansible.builtin.debug:
msg: "at array position {{ item.0 }} there is a value {{ item.1 }}"
with_indexed_items:
- "{{ some_list }}"
"""
RETURN = """
_raw:
description:
- list with each item.0 giving you the position and item.1 the value
type: list
elements: list
"""
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_indexed_items expects a list")
items = self._flatten(terms)
return list(zip(range(len(items)), items))
| 1,532
|
Python
|
.py
| 41
| 32.658537
| 138
| 0.691425
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,431
|
env.py
|
ansible_ansible/lib/ansible/plugins/lookup/env.py
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: env
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "0.9"
short_description: Read the value of environment variables
description:
- Allows you to query the environment variables available on the
controller when you invoked Ansible.
options:
_terms:
description:
- Environment variable or list of them to lookup the values for.
required: True
default:
description: What return when the variable is undefined
type: raw
default: ''
version_added: '2.13'
notes:
- You can pass the C(Undefined) object as O(default) to force an undefined error
"""
EXAMPLES = """
- name: Basic usage
ansible.builtin.debug:
msg: "'{{ lookup('ansible.builtin.env', 'HOME') }}' is the HOME environment variable."
- name: Before 2.13, how to set default value if the variable is not defined
ansible.builtin.debug:
msg: "Hello {{ lookup('ansible.builtin.env', 'UNDEFINED_VARIABLE') | default('World', True) }}"
- name: Example how to set default value if the variable is not defined
ansible.builtin.debug:
msg: "Hello {{ lookup('ansible.builtin.env', 'UNDEFINED_VARIABLE', default='World') }}"
- name: Fail if the variable is not defined by setting default value to 'Undefined'
ansible.builtin.debug:
msg: "Hello {{ lookup('ansible.builtin.env', 'UNDEFINED_VARIABLE', default=Undefined) }}"
- name: Fail if the variable is not defined by setting default value to 'undef()'
ansible.builtin.debug:
msg: "Hello {{ lookup('ansible.builtin.env', 'UNDEFINED_VARIABLE', default=undef()) }}"
"""
RETURN = """
_list:
description:
- Values from the environment variables.
type: list
"""
import os
from jinja2.runtime import Undefined
from ansible.errors import AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
ret = []
d = self.get_option('default')
for term in terms:
var = term.split()[0]
val = os.environ.get(var, d)
if isinstance(val, Undefined):
raise AnsibleUndefinedVariable('The "env" lookup, found an undefined variable: %s' % var)
ret.append(val)
return ret
| 2,621
|
Python
|
.py
| 64
| 35.453125
| 105
| 0.683445
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,432
|
nested.py
|
ansible_ansible/lib/ansible/plugins/lookup/nested.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: nested
version_added: "1.1"
short_description: composes a list with nested elements of other lists
description:
- Takes the input lists and returns a list with elements that are lists composed of the elements of the input lists
options:
_raw:
description:
- a set of lists
required: True
"""
EXAMPLES = """
- name: give users access to multiple databases
community.mysql.mysql_user:
name: "{{ item[0] }}"
priv: "{{ item[1] }}.*:ALL"
append_privs: yes
password: "foo"
with_nested:
- [ 'alice', 'bob' ]
- [ 'clientdb', 'employeedb', 'providerdb' ]
# As with the case of 'with_items' above, you can use previously defined variables.:
- name: here, 'users' contains the above list of employees
community.mysql.mysql_user:
name: "{{ item[0] }}"
priv: "{{ item[1] }}.*:ALL"
append_privs: yes
password: "foo"
with_nested:
- "{{ users }}"
- [ 'clientdb', 'employeedb', 'providerdb' ]
"""
RETURN = """
_list:
description:
- A list composed of lists paring the elements of the input lists
type: list
"""
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _lookup_variables(self, terms, variables):
results = []
for x in terms:
try:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, fail_on_undefined=True)
except UndefinedError as e:
raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms, variables)
my_list = terms[:]
my_list.reverse()
result = []
if len(my_list) == 0:
raise AnsibleError("with_nested requires at least one element in the nested list")
result = my_list.pop()
while len(my_list) > 0:
result2 = self._combine(result, my_list.pop())
result = result2
new_result = []
for x in result:
new_result.append(self._flatten(x))
return new_result
| 2,649
|
Python
|
.py
| 72
| 30.583333
| 123
| 0.649903
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,433
|
inventory_hostnames.py
|
ansible_ansible/lib/ansible/plugins/lookup/inventory_hostnames.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Steven Dossett <sdossett@panath.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: inventory_hostnames
author:
- Michael DeHaan
- Steven Dossett (!UNKNOWN) <sdossett@panath.com>
version_added: "1.3"
short_description: list of inventory hosts matching a host pattern
description:
- "This lookup understands 'host patterns' as used by the C(hosts:) keyword in plays
and can return a list of matching hosts from inventory"
notes:
- this is only worth for 'hostname patterns' it is easier to loop over the group/group_names variables otherwise.
"""
EXAMPLES = """
- name: show all the hosts matching the pattern, i.e. all but the group www
ansible.builtin.debug:
msg: "{{ item }}"
with_inventory_hostnames:
- all:!www
"""
RETURN = """
_hostnames:
description: list of hostnames that matched the host pattern in inventory
type: list
"""
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
manager = InventoryManager(self._loader, parse=False)
for group, hosts in variables['groups'].items():
manager.add_group(group)
for host in hosts:
manager.add_host(host, group=group)
try:
return [h.name for h in manager.get_hosts(pattern=terms)]
except AnsibleError:
return []
| 1,716
|
Python
|
.py
| 44
| 33.795455
| 119
| 0.698918
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,434
|
varnames.py
|
ansible_ansible/lib/ansible/plugins/lookup/varnames.py
|
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: varnames
author: Ansible Core Team
version_added: "2.8"
short_description: Lookup matching variable names
description:
- Retrieves a list of matching Ansible variable names.
options:
_terms:
description: List of Python regex patterns to search for in variable names.
required: True
"""
EXAMPLES = """
- name: List variables that start with qz_
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '^qz_.+')}}"
vars:
qz_1: hello
qz_2: world
qa_1: "I won't show"
qz_: "I won't show either"
- name: Show all variables
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '.+')}}"
- name: Show variables with 'hosts' in their names
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', 'hosts')}}"
- name: Find several related variables that end specific way
ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '.+_zone$', '.+_location$') }}"
"""
RETURN = """
_value:
description:
- List of the variable names requested.
type: list
"""
import re
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if variables is None:
raise AnsibleError('No variables available to search')
self.set_options(var_options=variables, direct=kwargs)
ret = []
variable_names = list(variables.keys())
for term in terms:
if not isinstance(term, string_types):
raise AnsibleError('Invalid setting identifier, "%s" is not a string, it is a %s' % (term, type(term)))
try:
name = re.compile(term)
except Exception as e:
raise AnsibleError('Unable to use "%s" as a search parameter: %s' % (term, to_native(e)))
for varname in variable_names:
if name.search(varname):
ret.append(varname)
return ret
| 2,330
|
Python
|
.py
| 59
| 33.389831
| 119
| 0.666075
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,435
|
random_choice.py
|
ansible_ansible/lib/ansible/plugins/lookup/random_choice.py
|
# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: random_choice
author: Michael DeHaan
version_added: "1.1"
short_description: return random element from list
description:
- The 'random_choice' feature can be used to pick something at random. While it's not a load balancer (there are modules for those),
it can somewhat be used as a poor man's load balancer in a MacGyver like situation.
- At a more basic level, they can be used to add chaos and excitement to otherwise predictable automation environments.
"""
EXAMPLES = """
- name: Magic 8 ball for MUDs
ansible.builtin.debug:
msg: "{{ item }}"
with_random_choice:
- "go through the door"
- "drink from the goblet"
- "press the red button"
- "do nothing"
"""
RETURN = """
_raw:
description:
- random item
type: raw
"""
import secrets
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = terms
if terms:
try:
ret = [secrets.choice(terms)]
except Exception as e:
raise AnsibleError("Unable to choose random term: %s" % to_native(e))
return ret
| 1,543
|
Python
|
.py
| 43
| 30.767442
| 138
| 0.686117
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,436
|
list.py
|
ansible_ansible/lib/ansible/plugins/lookup/list.py
|
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: list
author: Ansible Core Team
version_added: "2.0"
short_description: simply returns what it is given.
description:
- this is mostly a noop, to be used as a with_list loop when you dont want the content transformed in any way.
"""
EXAMPLES = """
- name: unlike with_items you will get 3 items from this loop, the 2nd one being a list
ansible.builtin.debug: var=item
with_list:
- 1
- [2,3]
- 4
"""
RETURN = """
_list:
description: basically the same as you fed in
type: list
elements: raw
"""
from collections.abc import Sequence
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleError
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
if not isinstance(terms, Sequence):
raise AnsibleError("with_list expects a list")
return terms
| 1,058
|
Python
|
.py
| 33
| 28.090909
| 116
| 0.707389
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,437
|
ini.py
|
ansible_ansible/lib/ansible/plugins/lookup/ini.py
|
# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: ini
author: Yannig Perre (!UNKNOWN) <yannig.perre(at)gmail.com>
version_added: "2.0"
short_description: read data from an ini file
description:
- "The ini lookup reads the contents of a file in INI format C(key1=value1).
This plugin retrieves the value on the right side after the equal sign C('=') of a given section C([section])."
- "You can also read a property file which - in this case - does not contain section."
options:
_terms:
description: The key(s) to look up.
required: True
type:
description: Type of the file. 'properties' refers to the Java properties files.
default: 'ini'
choices: ['ini', 'properties']
file:
description: Name of the file to load.
default: 'ansible.ini'
section:
default: global
description: Section where to lookup the key.
re:
default: False
type: boolean
description: Flag to indicate if the key supplied is a regexp.
encoding:
default: utf-8
description: Text encoding to use.
default:
description: Return value if the key is not in the ini file.
default: ''
case_sensitive:
description:
Whether key names read from O(file) should be case sensitive. This prevents
duplicate key errors if keys only differ in case.
default: False
version_added: '2.12'
allow_no_value:
description:
- Read an ini file which contains key without value and without '=' symbol.
type: bool
default: False
aliases: ['allow_none']
version_added: '2.12'
interpolation:
description:
Allows for interpolation of values, see https://docs.python.org/3/library/configparser.html#configparser.BasicInterpolation
type: bool
default: True
version_added: '2.18'
seealso:
- ref: playbook_task_paths
description: Search paths used for relative files.
"""
EXAMPLES = """
- ansible.builtin.debug: msg="User in integration is {{ lookup('ansible.builtin.ini', 'user', section='integration', file='users.ini') }}"
- ansible.builtin.debug: msg="User in production is {{ lookup('ansible.builtin.ini', 'user', section='production', file='users.ini') }}"
- ansible.builtin.debug: msg="user.name is {{ lookup('ansible.builtin.ini', 'user.name', type='properties', file='user.properties') }}"
- ansible.builtin.debug:
msg: "{{ item }}"
loop: "{{ q('ansible.builtin.ini', '.*', section='section1', file='test.ini', re=True) }}"
- name: Read an ini file with allow_no_value
ansible.builtin.debug:
msg: "{{ lookup('ansible.builtin.ini', 'user', file='mysql.ini', section='mysqld', allow_no_value=True) }}"
"""
RETURN = """
_raw:
description:
- value(s) of the key(s) in the ini file
type: list
elements: str
"""
import configparser
import os
import re
from io import StringIO
from collections import defaultdict
from collections.abc import MutableSequence
from ansible.errors import AnsibleLookupError, AnsibleOptionsError
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.plugins.lookup import LookupBase
def _parse_params(term, paramvals):
"""Safely split parameter term to preserve spaces"""
# TODO: deprecate this method
valid_keys = paramvals.keys()
params = defaultdict(lambda: '')
# TODO: check kv_parser to see if it can handle spaces this same way
keys = []
thiskey = 'key' # initialize for 'lookup item'
for idp, phrase in enumerate(term.split()):
# update current key if used
if '=' in phrase:
for k in valid_keys:
if ('%s=' % k) in phrase:
thiskey = k
# if first term or key does not exist
if idp == 0 or not params[thiskey]:
params[thiskey] = phrase
keys.append(thiskey)
else:
# append to existing key
params[thiskey] += ' ' + phrase
# return list of values
return [params[x] for x in keys]
class LookupModule(LookupBase):
def get_value(self, key, section, dflt, is_regexp):
# Retrieve all values from a section using a regexp
if is_regexp:
return [v for k, v in self.cp.items(section) if re.match(key, k)]
value = None
# Retrieve a single value
try:
value = self.cp.get(section, key)
except configparser.NoOptionError:
return dflt
return value
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
paramvals = self.get_options()
self.cp = configparser.ConfigParser(
allow_no_value=paramvals.get('allow_no_value', paramvals.get('allow_none')),
interpolation=configparser.BasicInterpolation() if paramvals.get('interpolation') else None,
)
if paramvals['case_sensitive']:
self.cp.optionxform = to_native
ret = []
for term in terms:
key = term
# parameters specified?
if '=' in term or ' ' in term.strip():
self._deprecate_inline_kv()
params = _parse_params(term, paramvals)
try:
updated_key = False
updated_options = False
for param in params:
if '=' in param:
name, value = param.split('=')
if name not in paramvals:
raise AnsibleLookupError('%s is not a valid option.' % name)
self.set_option(name, value)
updated_options = True
elif key == term:
# only take first, this format never supported multiple keys inline
key = param
updated_key = True
if updated_options:
paramvals = self.get_options()
except ValueError as e:
# bad params passed
raise AnsibleLookupError("Could not use '%s' from '%s': %s" % (param, params, to_native(e)), orig_exc=e)
if not updated_key:
raise AnsibleOptionsError("No key to lookup was provided as first term with in string inline options: %s" % term)
# only passed options in inline string
# TODO: look to use cache to avoid redoing this for every term if they use same file
# Retrieve file path
path = self.find_file_in_search_path(variables, 'files', paramvals['file'])
# Create StringIO later used to parse ini
config = StringIO()
# Special case for java properties
if paramvals['type'] == "properties":
config.write(u'[java_properties]\n')
paramvals['section'] = 'java_properties'
# Open file using encoding
contents, show_data = self._loader._get_file_contents(path)
contents = to_text(contents, errors='surrogate_or_strict', encoding=paramvals['encoding'])
config.write(contents)
config.seek(0, os.SEEK_SET)
try:
self.cp.read_file(config)
except configparser.DuplicateOptionError as doe:
raise AnsibleLookupError("Duplicate option in '{file}': {error}".format(file=paramvals['file'], error=to_native(doe)))
try:
var = self.get_value(key, paramvals['section'], paramvals['default'], paramvals['re'])
except configparser.NoSectionError:
raise AnsibleLookupError("No section '{section}' in {file}".format(section=paramvals['section'], file=paramvals['file']))
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| 8,421
|
Python
|
.py
| 189
| 34.116402
| 138
| 0.600829
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,438
|
items.py
|
ansible_ansible/lib/ansible/plugins/lookup/items.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: items
author: Michael DeHaan
version_added: historical
short_description: list of items
description:
- this lookup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
notes:
- this is the standard lookup used for loops in most examples
- check out the 'flattened' lookup for recursive flattening
- if you do not want flattening nor any other transformation look at the 'list' lookup.
options:
_terms:
description: list of items
required: True
"""
EXAMPLES = """
- name: "loop through list"
ansible.builtin.debug:
msg: "An item: {{ item }}"
with_items:
- 1
- 2
- 3
- name: add several users
ansible.builtin.user:
name: "{{ item }}"
groups: "wheel"
state: present
with_items:
- testuser1
- testuser2
- name: "loop through list from a variable"
ansible.builtin.debug:
msg: "An item: {{ item }}"
with_items: "{{ somelist }}"
- name: more complex items to add several users
ansible.builtin.user:
name: "{{ item.name }}"
uid: "{{ item.uid }}"
groups: "{{ item.groups }}"
state: present
with_items:
- { name: testuser1, uid: 1002, groups: "wheel, staff" }
- { name: testuser2, uid: 1003, groups: staff }
"""
RETURN = """
_raw:
description:
- once flattened list
type: list
"""
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
return self._flatten(terms)
| 1,815
|
Python
|
.py
| 60
| 25.95
| 145
| 0.664945
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,439
|
subelements.py
|
ansible_ansible/lib/ansible/plugins/lookup/subelements.py
|
# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: subelements
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
version_added: "1.4"
short_description: traverse nested key from a list of dictionaries
description:
- Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those records.
options:
_terms:
description: tuple of list of dictionaries and dictionary key to extract
required: True
skip_missing:
default: False
description:
- Lookup accepts this flag from a dictionary as optional. See Example section for more information.
- If set to V(True), the lookup plugin will skip the lists items that do not contain the given subkey.
- If set to V(False), the plugin will yield an error and complain about the missing subkey.
"""
EXAMPLES = """
- name: show var structure as it is needed for example to make sense
hosts: all
vars:
users:
- name: alice
authorized:
- /tmp/alice/onekey.pub
- /tmp/alice/twokey.pub
mysql:
password: mysql-password
hosts:
- "%"
- "127.0.0.1"
- "::1"
- "localhost"
privs:
- "*.*:SELECT"
- "DB1.*:ALL"
groups:
- wheel
- name: bob
authorized:
- /tmp/bob/id_rsa.pub
mysql:
password: other-mysql-password
hosts:
- "db1"
privs:
- "*.*:SELECT"
- "DB2.*:ALL"
tasks:
- name: Set authorized ssh key, extracting just that data from 'users'
ansible.posix.authorized_key:
user: "{{ item.0.name }}"
key: "{{ lookup('file', item.1) }}"
with_subelements:
- "{{ users }}"
- authorized
- name: Setup MySQL users, given the mysql hosts and privs subkey lists
community.mysql.mysql_user:
name: "{{ item.0.name }}"
password: "{{ item.0.mysql.password }}"
host: "{{ item.1 }}"
priv: "{{ item.0.mysql.privs | join('/') }}"
with_subelements:
- "{{ users }}"
- mysql.hosts
- name: list groups for users that have them, don't error if groups key is missing
ansible.builtin.debug: var=item
loop: "{{ q('ansible.builtin.subelements', users, 'groups', {'skip_missing': True}) }}"
"""
RETURN = """
_list:
description: list of subelements extracted
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, " + msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0]:
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all(isinstance(key, string_types) and key in FLAGS for key in flags):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False), strict=False)
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if subkey not in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
| 6,272
|
Python
|
.py
| 148
| 31.527027
| 142
| 0.574705
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,440
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/terminal/__init__.py
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
import re
from abc import ABC, abstractmethod
from ansible.errors import AnsibleConnectionFailure
class TerminalBase(ABC):
"""
A base class for implementing cli connections
.. note:: Unlike most of Ansible, nearly all strings in
:class:`TerminalBase` plugins are byte strings. This is because of
how close to the underlying platform these plugins operate. Remember
to mark literal strings as byte string (``b"string"``) and to use
:func:`~ansible.module_utils.common.text.converters.to_bytes` and
:func:`~ansible.module_utils.common.text.converters.to_text` to avoid unexpected
problems.
"""
#: compiled bytes regular expressions as stdout
terminal_stdout_re = [] # type: list[re.Pattern]
#: compiled bytes regular expressions as stderr
terminal_stderr_re = [] # type: list[re.Pattern]
#: compiled bytes regular expressions to remove ANSI codes
ansi_re = [
re.compile(br'\x1b\[\?1h\x1b='), # CSI ? 1 h ESC =
re.compile(br'\x08.'), # [Backspace] .
re.compile(br"\x1b\[m"), # ANSI reset code
]
#: terminal initial prompt
terminal_initial_prompt = None
#: terminal initial answer
terminal_initial_answer = None
#: Send newline after prompt match
terminal_inital_prompt_newline = True
def __init__(self, connection):
self._connection = connection
def _exec_cli_command(self, cmd, check_rc=True):
"""
Executes the CLI command on the remote device and returns the output
:arg cmd: Byte string command to be executed
"""
return self._connection.exec_command(cmd)
def _get_prompt(self):
"""
Returns the current prompt from the device
:returns: A byte string of the prompt
"""
return self._connection.get_prompt()
def on_open_shell(self):
"""Called after the SSH session is established
This method is called right after the invoke_shell() is called from
the Paramiko SSHClient instance. It provides an opportunity to setup
terminal parameters such as disabling paging for instance.
"""
pass
def on_close_shell(self):
"""Called before the connection is closed
This method gets called once the connection close has been requested
but before the connection is actually closed. It provides an
opportunity to clean up any terminal resources before the shell is
actually closed
"""
pass
def on_become(self, passwd=None):
"""Called when privilege escalation is requested
:kwarg passwd: String containing the password
This method is called when the privilege is requested to be elevated
in the play context by setting become to True. It is the responsibility
of the terminal plugin to actually do the privilege escalation such
as entering `enable` mode for instance
"""
pass
def on_unbecome(self):
"""Called when privilege deescalation is requested
This method is called when the privilege changed from escalated
(become=True) to non escalated (become=False). It is the responsibility
of this method to actually perform the deauthorization procedure
"""
pass
def on_authorize(self, passwd=None):
"""Deprecated method for privilege escalation
:kwarg passwd: String containing the password
"""
return self.on_become(passwd)
def on_deauthorize(self):
"""Deprecated method for privilege deescalation
"""
return self.on_unbecome()
| 4,420
|
Python
|
.py
| 103
| 36.427184
| 88
| 0.687966
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,441
|
tree.py
|
ansible_ansible/lib/ansible/plugins/callback/tree.py
|
# (c) 2012-2014, Ansible, Inc
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: tree
type: notification
requirements:
- invoked in the command line
short_description: Save host events to files
version_added: "2.0"
options:
directory:
version_added: '2.11'
description: directory that will contain the per host JSON files. Also set by the C(--tree) option when using adhoc.
ini:
- section: callback_tree
key: directory
env:
- name: ANSIBLE_CALLBACK_TREE_DIR
default: "~/.ansible/tree"
type: path
description:
- "This callback is used by the Ansible (adhoc) command line option C(-t|--tree)."
- This produces a JSON dump of events in a directory, a file for each host, the directory used MUST be passed as a command line option.
"""
import os
from ansible.constants import TREE_DIR
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.plugins.callback import CallbackBase
from ansible.utils.path import makedirs_safe, unfrackpath
class CallbackModule(CallbackBase):
"""
This callback puts results into a host specific file in a directory in json format.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'tree'
CALLBACK_NEEDS_ENABLED = True
def set_options(self, task_keys=None, var_options=None, direct=None):
""" override to set self.tree """
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
if TREE_DIR:
# TREE_DIR comes from the CLI option --tree, only available for adhoc
self.tree = unfrackpath(TREE_DIR)
else:
self.tree = self.get_option('directory')
def write_tree_file(self, hostname, buf):
""" write something into treedir/hostname """
buf = to_bytes(buf)
try:
makedirs_safe(self.tree)
except (OSError, IOError) as e:
self._display.warning(u"Unable to access or create the configured directory (%s): %s" % (to_text(self.tree), to_text(e)))
try:
path = to_bytes(os.path.join(self.tree, hostname))
with open(path, 'wb+') as fd:
fd.write(buf)
except (OSError, IOError) as e:
self._display.warning(u"Unable to write to %s's file: %s" % (hostname, to_text(e)))
def result_to_tree(self, result):
self.write_tree_file(result._host.get_name(), self._dump_results(result._result))
def v2_runner_on_ok(self, result):
self.result_to_tree(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
self.result_to_tree(result)
def v2_runner_on_unreachable(self, result):
self.result_to_tree(result)
| 3,014
|
Python
|
.py
| 68
| 36.367647
| 143
| 0.64971
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,442
|
minimal.py
|
ansible_ansible/lib/ansible/plugins/callback/minimal.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: minimal
type: stdout
short_description: minimal Ansible screen output
version_added: historical
description:
- This is the default output callback used by the ansible command (ad-hoc)
extends_documentation_fragment:
- result_format_callback
"""
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
"""
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'minimal'
def _command_generic_msg(self, host, result, caption):
""" output the result of a command run """
buf = "%s | %s | rc=%s >>\n" % (host, caption, result.get('rc', -1))
buf += result.get('stdout', '')
buf += result.get('stderr', '')
buf += result.get('msg', '')
return buf + "\n"
def v2_runner_on_failed(self, result, ignore_errors=False):
self._handle_exception(result._result)
self._handle_warnings(result._result)
if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
else:
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
self._clean_results(result._result, result._task.action)
self._handle_warnings(result._result)
if result._result.get('changed', False):
color = C.COLOR_CHANGED
state = 'CHANGED'
else:
color = C.COLOR_OK
state = 'SUCCESS'
if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
else:
self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=4)), color=color)
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
def v2_on_file_diff(self, result):
if 'diff' in result._result and result._result['diff']:
self._display.display(self._get_diff(result._result['diff']))
| 3,008
|
Python
|
.py
| 58
| 44.482759
| 157
| 0.651877
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,443
|
oneline.py
|
ansible_ansible/lib/ansible/plugins/callback/oneline.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: oneline
type: stdout
short_description: oneline Ansible screen output
version_added: historical
description:
- This is the output callback used by the C(-o)/C(--one-line) command line option.
"""
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
"""
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'oneline'
def _command_generic_msg(self, hostname, result, caption):
stdout = result.get('stdout', '').replace('\n', '\\n').replace('\r', '\\r')
if 'stderr' in result and result['stderr']:
stderr = result.get('stderr', '').replace('\n', '\\n').replace('\r', '\\r')
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc', -1), stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc', -1), stdout)
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n', '')
if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, 'FAILED'), color=C.COLOR_ERROR)
else:
self._display.display(msg, color=C.COLOR_ERROR)
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n', '')),
color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
color = C.COLOR_CHANGED
state = 'CHANGED'
else:
color = C.COLOR_OK
state = 'SUCCESS'
if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
else:
self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=0).replace('\n', '')),
color=color)
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE!: %s" % (result._host.get_name(), result._result.get('msg', '')), color=C.COLOR_UNREACHABLE)
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
| 3,450
|
Python
|
.py
| 59
| 49.067797
| 149
| 0.617185
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,444
|
default.py
|
ansible_ansible/lib/ansible/plugins/callback/default.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: default
type: stdout
short_description: default Ansible screen output
version_added: historical
description:
- This is the default output callback for ansible-playbook.
extends_documentation_fragment:
- default_callback
- result_format_callback
requirements:
- set as stdout in configuration
"""
from ansible import constants as C
from ansible import context
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from ansible.utils.fqcn import add_internal_fqcns
class CallbackModule(CallbackBase):
"""
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self):
self._play = None
self._last_task_banner = None
self._last_task_name = None
self._task_type_cache = {}
super(CallbackModule, self).__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
host_label = self.host_label(result)
self._clean_results(result._result, result._task.action)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if self._display.verbosity < 2 and self.get_option('show_task_path_on_failure'):
self._print_task_path(result._task)
msg = "fatal: [%s]: FAILED! => %s" % (host_label, self._dump_results(result._result))
self._display.display(msg, color=C.COLOR_ERROR, stderr=self.get_option('display_failed_stderr'))
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
host_label = self.host_label(result)
if isinstance(result._task, TaskInclude):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
return
elif result._result.get('changed', False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = "changed: [%s]" % (host_label,)
color = C.COLOR_CHANGED
else:
if not self.get_option('display_ok_hosts'):
return
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = "ok: [%s]" % (host_label,)
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result):
if self.get_option('display_skipped_hosts'):
self._clean_results(result._result, result._task.action)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop is not None and 'results' in result._result:
self._process_items(result)
msg = "skipping: [%s]" % result._host.get_name()
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
host_label = self.host_label(result)
msg = "fatal: [%s]: UNREACHABLE! => %s" % (host_label, self._dump_results(result._result))
self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.get_option('display_failed_stderr'))
if result._task.ignore_unreachable:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
self._task_start(task, prefix='TASK')
def _task_start(self, task, prefix=None):
# Cache output prefix for task if provided
# This is needed to properly display 'RUNNING HANDLER' and similar
# when hiding skipped/ok task results
if prefix is not None:
self._task_type_cache[task._uuid] = prefix
# Preserve task name, as all vars may not be available for templating
# when we need it later
if self._play.strategy in add_internal_fqcns(('free', 'host_pinned')):
# Explicitly set to None for strategy free/host_pinned to account for any cached
# task title from a previous non-free play
self._last_task_name = None
else:
self._last_task_name = task.get_name().strip()
# Display the task banner immediately if we're not doing any filtering based on task result
if self.get_option('display_skipped_hosts') and self.get_option('display_ok_hosts'):
self._print_task_banner(task)
def _print_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it there yet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
prefix = self._task_type_cache.get(task._uuid, 'TASK')
# Use cached task name
task_name = self._last_task_name
if task_name is None:
task_name = task.get_name().strip()
if task.check_mode and self.get_option('check_mode_markers'):
checkmsg = " [CHECK MODE]"
else:
checkmsg = ""
self._display.banner(u"%s [%s%s]%s" % (prefix, task_name, args, checkmsg))
if self._display.verbosity >= 2:
self._print_task_path(task)
self._last_task_banner = task._uuid
def v2_playbook_on_cleanup_task_start(self, task):
self._task_start(task, prefix='CLEANUP TASK')
def v2_playbook_on_handler_task_start(self, task):
self._task_start(task, prefix='RUNNING HANDLER')
def v2_runner_on_start(self, host, task):
if self.get_option('show_per_host_start'):
self._display.display(" [started %s on %s]" % (task, host), color=C.COLOR_OK)
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if play.check_mode and self.get_option('check_mode_markers'):
checkmsg = " [CHECK MODE]"
else:
checkmsg = ""
if not name:
msg = u"PLAY%s" % checkmsg
else:
msg = u"PLAY [%s]%s" % (name, checkmsg)
self._play = play
self._display.banner(msg)
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
for res in result._result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._display.display(diff)
elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
diff = self._get_diff(result._result['diff'])
if diff:
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._display.display(diff)
def v2_runner_item_on_ok(self, result):
host_label = self.host_label(result)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = 'changed'
color = C.COLOR_CHANGED
else:
if not self.get_option('display_ok_hosts'):
return
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = 'ok'
color = C.COLOR_OK
msg = "%s: [%s] => (item=%s)" % (msg, host_label, self._get_item_label(result._result))
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_runner_item_on_failed(self, result):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
host_label = self.host_label(result)
self._clean_results(result._result, result._task.action)
self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
msg = "failed: [%s]" % (host_label,)
self._handle_warnings(result._result)
self._display.display(
msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)),
color=C.COLOR_ERROR,
stderr=self.get_option('display_failed_stderr')
)
def v2_runner_item_on_skipped(self, result):
if self.get_option('display_skipped_hosts'):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._clean_results(result._result, result._task.action)
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item_label(result._result))
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
label = self._get_item_label(included_file._vars)
if label:
msg += " => (item=%s)" % label
self._display.display(msg, color=C.COLOR_INCLUDED)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(
u"%s : %s %s %s %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR),
colorize(u'skipped', t['skipped'], C.COLOR_SKIP),
colorize(u'rescued', t['rescued'], C.COLOR_OK),
colorize(u'ignored', t['ignored'], C.COLOR_WARN),
),
screen_only=True
)
self._display.display(
u"%s : %s %s %s %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None),
colorize(u'skipped', t['skipped'], None),
colorize(u'rescued', t['rescued'], None),
colorize(u'ignored', t['ignored'], None),
),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats if required
if stats.custom and self.get_option('show_custom_stats'):
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
self._display.banner("DRY RUN")
def v2_playbook_on_start(self, playbook):
if self._display.verbosity > 1:
from os.path import basename
self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
# show CLI arguments
if self._display.verbosity > 3:
if context.CLIARGS.get('args'):
self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
color=C.COLOR_VERBOSE, screen_only=True)
for argument in (a for a in context.CLIARGS if a != 'args'):
val = context.CLIARGS[argument]
if val:
self._display.display('%s: %s' % (argument, val), color=C.COLOR_VERBOSE, screen_only=True)
if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
self._display.banner("DRY RUN")
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
host_label = self.host_label(result)
msg = "FAILED - RETRYING: [%s]: %s (%d retries left)." % (host_label, task_name, result._result['retries'] - result._result['attempts'])
if self._run_is_verbose(result, verbosity=2):
msg += "Result was: %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_DEBUG)
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
started = result._result.get('started')
finished = result._result.get('finished')
self._display.display(
'ASYNC POLL on %s: jid=%s started=%s finished=%s' % (host, jid, started, finished),
color=C.COLOR_DEBUG
)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self._display.display("ASYNC OK on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
# Attempt to get the async job ID. If the job does not finish before the
# async timeout value, the ID may be within the unparsed 'async_result' dict.
jid = result._result.get('ansible_job_id')
if not jid and 'async_result' in result._result:
jid = result._result['async_result'].get('ansible_job_id')
self._display.display("ASYNC FAILED on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
def v2_playbook_on_notify(self, handler, host):
if self._display.verbosity > 1:
self._display.display("NOTIFIED HANDLER %s for %s" % (handler.get_name(), host), color=C.COLOR_VERBOSE, screen_only=True)
| 16,883
|
Python
|
.py
| 324
| 40.774691
| 144
| 0.589621
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,445
|
junit.py
|
ansible_ansible/lib/ansible/plugins/callback/junit.py
|
# (c) 2016 Matt Clay <matt@mystile.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: junit
type: aggregate
short_description: write playbook output to a JUnit file.
version_added: historical
description:
- This callback writes playbook output to a JUnit formatted XML file.
- "Tasks show up in the report as follows:
'ok': pass
'failed' with 'EXPECTED FAILURE' in the task name: pass
'failed' with 'TOGGLE RESULT' in the task name: pass
'ok' with 'TOGGLE RESULT' in the task name: failure
'failed' due to an exception: error
'failed' for other reasons: failure
'skipped': skipped"
options:
output_dir:
name: JUnit output dir
default: ~/.ansible.log
description: Directory to write XML files to.
env:
- name: JUNIT_OUTPUT_DIR
task_class:
name: JUnit Task class
default: False
description: Configure the output to be one class per yaml file
env:
- name: JUNIT_TASK_CLASS
task_relative_path:
name: JUnit Task relative path
default: none
description: Configure the output to use relative paths to given directory
version_added: "2.8"
env:
- name: JUNIT_TASK_RELATIVE_PATH
replace_out_of_tree_path:
name: Replace out of tree path
default: none
description: Replace the directory portion of an out-of-tree relative task path with the given placeholder
version_added: "2.12.3"
env:
- name: JUNIT_REPLACE_OUT_OF_TREE_PATH
fail_on_change:
name: JUnit fail on change
default: False
description: Consider any tasks reporting "changed" as a junit test failure
env:
- name: JUNIT_FAIL_ON_CHANGE
fail_on_ignore:
name: JUnit fail on ignore
default: False
description: Consider failed tasks as a junit test failure even if ignore_on_error is set
env:
- name: JUNIT_FAIL_ON_IGNORE
include_setup_tasks_in_report:
name: JUnit include setup tasks in report
default: True
description: Should the setup tasks be included in the final report
env:
- name: JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT
hide_task_arguments:
name: Hide the arguments for a task
default: False
description: Hide the arguments for a task
version_added: "2.8"
env:
- name: JUNIT_HIDE_TASK_ARGUMENTS
test_case_prefix:
name: Prefix to find actual test cases
default: <empty>
description: Consider a task only as test case if it has this value as prefix. Additionally failing tasks are recorded as failed test cases.
version_added: "2.8"
env:
- name: JUNIT_TEST_CASE_PREFIX
requirements:
- enable in configuration
"""
import os
import time
import re
from ansible import constants as C
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.plugins.callback import CallbackBase
from ansible.utils._junit_xml import (
TestCase,
TestError,
TestFailure,
TestSuite,
TestSuites,
)
class CallbackModule(CallbackBase):
"""
This callback writes playbook output to a JUnit formatted XML file.
Tasks show up in the report as follows:
'ok': pass
'failed' with 'EXPECTED FAILURE' in the task name: pass
'failed' with 'TOGGLE RESULT' in the task name: pass
'ok' with 'TOGGLE RESULT' in the task name: failure
'failed' due to an exception: error
'failed' for other reasons: failure
'skipped': skipped
This plugin makes use of the following environment variables:
JUNIT_OUTPUT_DIR (optional): Directory to write XML files to.
Default: ~/.ansible.log
JUNIT_TASK_CLASS (optional): Configure the output to be one class per yaml file
Default: False
JUNIT_TASK_RELATIVE_PATH (optional): Configure the output to use relative paths to given directory
Default: none
JUNIT_FAIL_ON_CHANGE (optional): Consider any tasks reporting "changed" as a junit test failure
Default: False
JUNIT_FAIL_ON_IGNORE (optional): Consider failed tasks as a junit test failure even if ignore_on_error is set
Default: False
JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT (optional): Should the setup tasks be included in the final report
Default: True
JUNIT_HIDE_TASK_ARGUMENTS (optional): Hide the arguments for a task
Default: False
JUNIT_TEST_CASE_PREFIX (optional): Consider a task only as test case if it has this value as prefix. Additionally failing tasks are recorded as failed
test cases.
Default: <empty>
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'junit'
CALLBACK_NEEDS_ENABLED = True
def __init__(self):
super(CallbackModule, self).__init__()
self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
self._task_class = os.getenv('JUNIT_TASK_CLASS', 'False').lower()
self._task_relative_path = os.getenv('JUNIT_TASK_RELATIVE_PATH', '')
self._fail_on_change = os.getenv('JUNIT_FAIL_ON_CHANGE', 'False').lower()
self._fail_on_ignore = os.getenv('JUNIT_FAIL_ON_IGNORE', 'False').lower()
self._include_setup_tasks_in_report = os.getenv('JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT', 'True').lower()
self._hide_task_arguments = os.getenv('JUNIT_HIDE_TASK_ARGUMENTS', 'False').lower()
self._test_case_prefix = os.getenv('JUNIT_TEST_CASE_PREFIX', '')
self._replace_out_of_tree_path = os.getenv('JUNIT_REPLACE_OUT_OF_TREE_PATH', None)
self._playbook_path = None
self._playbook_name = None
self._play_name = None
self._task_data = None
self.disabled = False
self._task_data = {}
if self._replace_out_of_tree_path is not None:
self._replace_out_of_tree_path = to_text(self._replace_out_of_tree_path)
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
def _start_task(self, task):
""" record the start of a task for one or more hosts """
uuid = task._uuid
if uuid in self._task_data:
return
play = self._play_name
name = task.get_name().strip()
path = task.get_path()
action = task.action
if not task.no_log and self._hide_task_arguments == 'false':
args = ', '.join(('%s=%s' % a for a in task.args.items()))
if args:
name += ' ' + args
self._task_data[uuid] = TaskData(uuid, name, path, play, action)
def _finish_task(self, status, result):
""" record the results of a task for a single host """
task_uuid = result._task._uuid
if hasattr(result, '_host'):
host_uuid = result._host._uuid
host_name = result._host.name
else:
host_uuid = 'include'
host_name = 'include'
task_data = self._task_data[task_uuid]
if self._fail_on_change == 'true' and status == 'ok' and result._result.get('changed', False):
status = 'failed'
# ignore failure if expected and toggle result if asked for
if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
status = 'ok'
elif 'TOGGLE RESULT' in task_data.name:
if status == 'failed':
status = 'ok'
elif status == 'ok':
status = 'failed'
if task_data.name.startswith(self._test_case_prefix) or status == 'failed':
task_data.add_host(HostData(host_uuid, host_name, status, result))
def _build_test_case(self, task_data, host_data):
""" build a TestCase from the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
duration = host_data.finish - task_data.start
if self._task_relative_path and task_data.path:
junit_classname = to_text(os.path.relpath(to_bytes(task_data.path), to_bytes(self._task_relative_path)))
else:
junit_classname = task_data.path
if self._replace_out_of_tree_path is not None and junit_classname.startswith('../'):
junit_classname = self._replace_out_of_tree_path + to_text(os.path.basename(to_bytes(junit_classname)))
if self._task_class == 'true':
junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname)
if host_data.status == 'included':
return TestCase(name=name, classname=junit_classname, time=duration, system_out=str(host_data.result))
res = host_data.result._result
rc = res.get('rc', 0)
dump = self._dump_results(res, indent=0)
dump = self._cleanse_string(dump)
if host_data.status == 'ok':
return TestCase(name=name, classname=junit_classname, time=duration, system_out=dump)
test_case = TestCase(name=name, classname=junit_classname, time=duration)
if host_data.status == 'failed':
if 'exception' in res:
message = res['exception'].strip().split('\n')[-1]
output = res['exception']
test_case.errors.append(TestError(message=message, output=output))
elif 'msg' in res:
message = res['msg']
test_case.failures.append(TestFailure(message=message, output=dump))
else:
test_case.failures.append(TestFailure(message='rc=%s' % rc, output=dump))
elif host_data.status == 'skipped':
if 'skip_reason' in res:
message = res['skip_reason']
else:
message = 'skipped'
test_case.skipped = message
return test_case
def _cleanse_string(self, value):
""" convert surrogate escapes to the unicode replacement character to avoid XML encoding errors """
return to_text(to_bytes(value, errors='surrogateescape'), errors='replace')
def _generate_report(self):
""" generate a TestSuite report from the collected TaskData and HostData """
test_cases = []
for task_uuid, task_data in self._task_data.items():
if task_data.action in C._ACTION_SETUP and self._include_setup_tasks_in_report == 'false':
continue
for host_uuid, host_data in task_data.host_data.items():
test_cases.append(self._build_test_case(task_data, host_data))
test_suite = TestSuite(name=self._playbook_name, cases=test_cases)
test_suites = TestSuites(suites=[test_suite])
report = test_suites.to_pretty_xml()
output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))
with open(output_file, 'wb') as xml:
xml.write(to_bytes(report, errors='surrogate_or_strict'))
def v2_playbook_on_start(self, playbook):
self._playbook_path = playbook._file_name
self._playbook_name = os.path.splitext(os.path.basename(self._playbook_path))[0]
def v2_playbook_on_play_start(self, play):
self._play_name = play.get_name()
def v2_runner_on_no_hosts(self, task):
self._start_task(task)
def v2_playbook_on_task_start(self, task, is_conditional):
self._start_task(task)
def v2_playbook_on_cleanup_task_start(self, task):
self._start_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._start_task(task)
def v2_runner_on_failed(self, result, ignore_errors=False):
if ignore_errors and self._fail_on_ignore != 'true':
self._finish_task('ok', result)
else:
self._finish_task('failed', result)
def v2_runner_on_ok(self, result):
self._finish_task('ok', result)
def v2_runner_on_skipped(self, result):
self._finish_task('skipped', result)
def v2_playbook_on_include(self, included_file):
self._finish_task('included', included_file)
def v2_playbook_on_stats(self, stats):
self._generate_report()
class TaskData:
"""
Data about an individual task.
"""
def __init__(self, uuid, name, path, play, action):
self.uuid = uuid
self.name = name
self.path = path
self.play = play
self.start = None
self.host_data = {}
self.start = time.time()
self.action = action
def add_host(self, host):
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
else:
raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
self.host_data[host.uuid] = host
class HostData:
"""
Data about an individual host.
"""
def __init__(self, uuid, name, status, result):
self.uuid = uuid
self.name = name
self.status = status
self.result = result
self.finish = time.time()
| 13,773
|
Python
|
.py
| 297
| 36.508418
| 158
| 0.615138
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,446
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/callback/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import difflib
import json
import re
import sys
import textwrap
from typing import TYPE_CHECKING
from collections import OrderedDict
from collections.abc import MutableMapping
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import text_type
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.parsing.yaml.objects import AnsibleUnicode
from ansible.plugins import AnsiblePlugin
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import AnsibleUnsafeText, NativeJinjaUnsafeText
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
import yaml
if TYPE_CHECKING:
from ansible.executor.task_result import TaskResult
global_display = Display()
__all__ = ["CallbackBase"]
_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
_YAML_TEXT_TYPES = (text_type, AnsibleUnicode, AnsibleUnsafeText, NativeJinjaUnsafeText)
# Characters that libyaml/pyyaml consider breaks
_YAML_BREAK_CHARS = '\n\x85\u2028\u2029' # NL, NEL, LS, PS
# regex representation of libyaml/pyyaml of a space followed by a break character
_SPACE_BREAK_RE = re.compile(fr' +([{_YAML_BREAK_CHARS}])')
class _AnsibleCallbackDumper(AnsibleDumper):
def __init__(self, lossy=False):
self._lossy = lossy
def __call__(self, *args, **kwargs):
# pyyaml expects that we are passing an object that can be instantiated, but to
# smuggle the ``lossy`` configuration, we do that in ``__init__`` and then
# define this ``__call__`` that will mimic the ability for pyyaml to instantiate class
super().__init__(*args, **kwargs)
return self
def _should_use_block(scalar):
"""Returns true if string should be in block format based on the existence of various newline separators"""
# This method of searching is faster than using a regex
for ch in _YAML_BREAK_CHARS:
if ch in scalar:
return True
return False
class _SpecialCharacterTranslator:
def __getitem__(self, ch):
# "special character" logic from pyyaml yaml.emitter.Emitter.analyze_scalar, translated to decimal
# for perf w/ str.translate
if (ch == 10 or
32 <= ch <= 126 or
ch == 133 or
160 <= ch <= 55295 or
57344 <= ch <= 65533 or
65536 <= ch < 1114111)\
and ch != 65279:
return ch
return None
def _filter_yaml_special(scalar):
"""Filter a string removing any character that libyaml/pyyaml declare as special"""
return scalar.translate(_SpecialCharacterTranslator())
def _munge_data_for_lossy_yaml(scalar):
"""Modify a string so that analyze_scalar in libyaml/pyyaml will allow block formatting"""
# we care more about readability than accuracy, so...
# ...libyaml/pyyaml does not permit trailing spaces for block scalars
scalar = scalar.rstrip()
# ...libyaml/pyyaml does not permit tabs for block scalars
scalar = scalar.expandtabs()
# ...libyaml/pyyaml only permits special characters for double quoted scalars
scalar = _filter_yaml_special(scalar)
# ...libyaml/pyyaml only permits spaces followed by breaks for double quoted scalars
return _SPACE_BREAK_RE.sub(r'\1', scalar)
def _pretty_represent_str(self, data):
"""Uses block style for multi-line strings"""
data = text_type(data)
if _should_use_block(data):
style = '|'
if self._lossy:
data = _munge_data_for_lossy_yaml(data)
else:
style = self.default_style
node = yaml.representer.ScalarNode('tag:yaml.org,2002:str', data, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
for data_type in _YAML_TEXT_TYPES:
_AnsibleCallbackDumper.add_representer(
data_type,
_pretty_represent_str
)
class CallbackBase(AnsiblePlugin):
"""
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
"""
def __init__(self, display=None, options=None):
if display:
self._display = display
else:
self._display = global_display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
self.disabled = False
self.wants_implicit_tasks = False
self._plugin_options = {}
if options is not None:
self.set_options(options)
self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
# helper for callbacks, so they don't all have to include deepcopy
_copy_result = deepcopy
def set_option(self, k, v):
self._plugin_options[k] = C.config.get_config_value(k, plugin_type=self.plugin_type, plugin_name=self._load_name, direct={k: v})
def get_option(self, k):
return self._plugin_options[k]
def set_options(self, task_keys=None, var_options=None, direct=None):
""" This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
Also _options was already taken for CLI args and callbacks use _plugin_options instead.
"""
# load from config
self._plugin_options = C.config.get_plugin_options(self.plugin_type, self._load_name, keys=task_keys, variables=var_options, direct=direct)
@staticmethod
def host_label(result):
"""Return label for the hostname (& delegated hostname) of a task
result.
"""
label = "%s" % result._host.get_name()
if result._task.delegate_to and result._task.delegate_to != result._host.get_name():
# show delegated host
label += " -> %s" % result._task.delegate_to
# in case we have 'extra resolution'
ahost = result._result.get('_ansible_delegated_vars', {}).get('ansible_host', result._task.delegate_to)
if result._task.delegate_to != ahost:
label += "(%s)" % ahost
return label
def _run_is_verbose(self, result, verbosity=0):
return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
and result._result.get('_ansible_verbose_override', False) is False)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False, serialize=True):
try:
result_format = self.get_option('result_format')
except KeyError:
# Callback does not declare result_format nor extend result_format_callback
result_format = 'json'
try:
pretty_results = self.get_option('pretty_results')
except KeyError:
# Callback does not declare pretty_results nor extend result_format_callback
pretty_results = None
indent_conditions = (
result.get('_ansible_verbose_always'),
pretty_results is None and result_format != 'json',
pretty_results is True,
self._display.verbosity > 2,
)
if not indent and any(indent_conditions):
indent = 4
if pretty_results is False:
# pretty_results=False overrides any specified indentation
indent = None
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
if not serialize:
# Just return ``abridged_result`` without going through serialization
# to permit callbacks to take advantage of ``_dump_results``
# that want to further modify the result, or use custom serialization
return abridged_result
if result_format == 'json':
try:
return json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
except TypeError:
# Python3 bug: throws an exception when keys are non-homogenous types:
# https://bugs.python.org/issue25457
# sort into an OrderedDict and then json.dumps() that instead
if not OrderedDict:
raise
return json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
cls=AnsibleJSONEncoder, indent=indent,
ensure_ascii=False, sort_keys=False)
elif result_format == 'yaml':
# None is a sentinel in this case that indicates default behavior
# default behavior for yaml is to prettify results
lossy = pretty_results in (None, True)
if lossy:
# if we already have stdout, we don't need stdout_lines
if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
abridged_result['stdout_lines'] = '<omitted>'
# if we already have stderr, we don't need stderr_lines
if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
abridged_result['stderr_lines'] = '<omitted>'
return '\n%s' % textwrap.indent(
yaml.dump(
abridged_result,
allow_unicode=True,
Dumper=_AnsibleCallbackDumper(lossy=lossy),
default_flow_style=False,
indent=indent,
# sort_keys=sort_keys # This requires PyYAML>=5.1
),
' ' * (indent or 4)
)
def _handle_warnings(self, res):
""" display warnings, if enabled and any exist in the result """
if C.ACTION_WARNINGS:
if 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
del res['warnings']
if 'deprecations' in res and res['deprecations']:
for warning in res['deprecations']:
self._display.deprecated(**warning)
del res['deprecations']
def _handle_exception(self, result, use_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. "
exception_str = to_text(result['exception'])
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = exception_str.strip().split('\n')[-1]
msg += "To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "The full traceback is:\n" + exception_str
del result['exception']
self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
def _serialize_diff(self, diff):
try:
result_format = self.get_option('result_format')
except KeyError:
# Callback does not declare result_format nor extend result_format_callback
result_format = 'json'
try:
pretty_results = self.get_option('pretty_results')
except KeyError:
# Callback does not declare pretty_results nor extend result_format_callback
pretty_results = None
if result_format == 'json':
return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
elif result_format == 'yaml':
# None is a sentinel in this case that indicates default behavior
# default behavior for yaml is to prettify results
lossy = pretty_results in (None, True)
return '%s\n' % textwrap.indent(
yaml.dump(
diff,
allow_unicode=True,
Dumper=_AnsibleCallbackDumper(lossy=lossy),
default_flow_style=False,
indent=4,
# sort_keys=sort_keys # This requires PyYAML>=5.1
),
' '
)
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
if 'dst_binary' in diff:
ret.append(u"diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append(u"diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
# format complex structures into 'files'
for x in ['before', 'after']:
if isinstance(diff[x], MutableMapping):
diff[x] = self._serialize_diff(diff[x])
elif diff[x] is None:
diff[x] = ''
if 'before_header' in diff:
before_header = u"before: %s" % diff['before_header']
else:
before_header = u'before'
if 'after_header' in diff:
after_header = u"after: %s" % diff['after_header']
else:
after_header = u'after'
before_lines = diff['before'].splitlines(True)
after_lines = diff['after'].splitlines(True)
if before_lines and not before_lines[-1].endswith(u'\n'):
before_lines[-1] += u'\n\\ No newline at end of file\n'
if after_lines and not after_lines[-1].endswith('\n'):
after_lines[-1] += u'\n\\ No newline at end of file\n'
differ = difflib.unified_diff(before_lines,
after_lines,
fromfile=before_header,
tofile=after_header,
fromfiledate=u'',
tofiledate=u'',
n=C.DIFF_CONTEXT)
difflines = list(differ)
has_diff = False
for line in difflines:
has_diff = True
if line.startswith(u'+'):
line = stringc(line, C.COLOR_DIFF_ADD)
elif line.startswith(u'-'):
line = stringc(line, C.COLOR_DIFF_REMOVE)
elif line.startswith(u'@@'):
line = stringc(line, C.COLOR_DIFF_LINES)
ret.append(line)
if has_diff:
ret.append('\n')
if 'prepared' in diff:
ret.append(diff['prepared'])
return u''.join(ret)
def _get_item_label(self, result):
""" retrieves the value to be displayed as a label for an item entry from a result object"""
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
else:
item = result.get('_ansible_item_label', result.get('item'))
return item
def _process_items(self, result):
# just remove them as now they get handled by individual callbacks
del result._result['results']
def _clean_results(self, result, task_name):
""" removes data from results for display """
# mostly controls that debug only outputs what it was meant to
if task_name in C._ACTION_DEBUG:
if 'msg' in result:
# msg should be alone
for key in list(result.keys()):
if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
result.pop(key)
else:
# 'var' value as field, so eliminate others and what is left should be varname
for hidme in self._hide_in_debug:
result.pop(hidme, None)
def _print_task_path(self, task, color=C.COLOR_DEBUG):
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=color)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
# V2 METHODS, by default they call v1 counterparts if possible
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result: TaskResult, ignore_errors: bool = False) -> None:
"""Get details about a failed task and whether or not Ansible should continue
running tasks on the host where the failure occurred, then process the details
as required by the callback (output, profiling, logging, notifications, etc.)
Note: The 'ignore_errors' directive only works when the task can run and returns
a value of 'failed'. It does not make Ansible ignore undefined variable errors,
connection failures, execution issues (for example, missing packages), or syntax errors.
Customization note: For more information about the attributes and methods of the
TaskResult class, see lib/ansible/executor/task_result.py.
:param TaskResult result: An object that contains details about the task
:param bool ignore_errors: Whether or not Ansible should continue running tasks on the host
where the failure occurred
:return: None
"""
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result: TaskResult) -> None:
"""Get details about a successful task and process them as required by the callback
(output, profiling, logging, notifications, etc.)
Customization note: For more information about the attributes and methods of the
TaskResult class, see lib/ansible/executor/task_result.py.
:param TaskResult result: An object that contains details about the task
:return: None
"""
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result: TaskResult) -> None:
"""Get details about a skipped task and process them as required by the callback
(output, profiling, logging, notifications, etc.)
Customization note: For more information about the attributes and methods of the
TaskResult class, see lib/ansible/executor/task_result.py.
:param TaskResult result: An object that contains details about the task
:return: None
"""
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
# FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
# Attempt to get the async job ID. If the job does not finish before the
# async timeout value, the ID may be within the unparsed 'async_result' dict.
jid = result._result.get('ansible_job_id')
if not jid and 'async_result' in result._result:
jid = result._result['async_result'].get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, handler, host):
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
# FIXME: not called
def v2_playbook_on_cleanup_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_handler_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
# FIXME: not called
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
# FIXME: not called
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass # no v1 correspondence
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
def v2_runner_on_start(self, host, task):
"""Event used when host begins execution of a task
.. versionadded:: 2.8
"""
pass
| 25,579
|
Python
|
.py
| 520
| 38.657692
| 160
| 0.62146
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,447
|
host_pinned.py
|
ansible_ansible/lib/ansible/plugins/strategy/host_pinned.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
DOCUMENTATION = """
name: host_pinned
short_description: Executes tasks on each host without interruption
description:
- Task execution is as fast as possible per host in batch as defined by C(serial) (default all).
Ansible will not start a play for a host unless the play can be finished without interruption by tasks for another host,
i.e. the number of hosts with an active play does not exceed the number of forks.
Ansible will not wait for other hosts to finish the current task before queuing the next task for a host that has finished.
Once a host is done with the play, it opens it's slot to a new host that was waiting to start.
Other than that, it behaves just like the "free" strategy.
version_added: "2.7"
author: Ansible Core Team
"""
from ansible.plugins.strategy.free import StrategyModule as FreeStrategyModule
from ansible.utils.display import Display
display = Display()
class StrategyModule(FreeStrategyModule):
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self._host_pinned = True
| 1,875
|
Python
|
.py
| 37
| 46.864865
| 133
| 0.746179
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,448
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/strategy/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import cmd
import functools
import os
import pprint
import queue
import sys
import threading
import time
import typing as t
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleUndefinedVariable, AnsibleParserError
from ansible.executor import action_write_locks
from ansible.executor.play_iterator import IteratingStates, PlayIterator
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.executor.task_queue_manager import CallbackSend, DisplaySend, PromptSend
from ansible.module_utils.six import string_types
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.conditional import Conditional
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.fqcn import add_internal_fqcns
from ansible.utils.unsafe_proxy import wrap_var
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
display = Display()
__all__ = ['StrategyBase']
# This list can be an exact match, or start of string bound
# does not accept regex
ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
'discovered_interpreter_',
))
class StrategySentinel:
pass
_sentinel = StrategySentinel()
def post_process_whens(result, task, templar, task_vars):
cond = None
if task.changed_when:
with templar.set_temporary_context(available_variables=task_vars):
cond = Conditional(loader=templar._loader)
cond.when = task.changed_when
result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
if task.failed_when:
with templar.set_temporary_context(available_variables=task_vars):
if cond is None:
cond = Conditional(loader=templar._loader)
cond.when = task.failed_when
failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
result['failed_when_result'] = result['failed'] = failed_when_result
def _get_item_vars(result, task):
item_vars = {}
if task.loop or task.loop_with:
loop_var = result.get('ansible_loop_var', 'item')
index_var = result.get('ansible_index_var')
if loop_var in result:
item_vars[loop_var] = result[loop_var]
if index_var and index_var in result:
item_vars[index_var] = result[index_var]
if '_ansible_item_label' in result:
item_vars['_ansible_item_label'] = result['_ansible_item_label']
if 'ansible_loop' in result:
item_vars['ansible_loop'] = result['ansible_loop']
return item_vars
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, DisplaySend):
dmethod = getattr(display, result.method)
dmethod(*result.args, **result.kwargs)
elif isinstance(result, CallbackSend):
for arg in result.args:
if isinstance(arg, TaskResult):
strategy.normalize_task_result(arg)
break
strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
elif isinstance(result, TaskResult):
strategy.normalize_task_result(result)
with strategy._results_lock:
strategy._results.append(result)
elif isinstance(result, PromptSend):
try:
value = display.prompt_until(
result.prompt,
private=result.private,
seconds=result.seconds,
complete_input=result.complete_input,
interrupt_input=result.interrupt_input,
)
except AnsibleError as e:
value = e
except BaseException as e:
# relay unexpected errors so bugs in display are reported and don't cause workers to hang
try:
raise AnsibleError(f"{e}") from e
except AnsibleError as e:
value = e
strategy._workers[result.worker_id].worker_queue.put(value)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (IOError, EOFError):
break
except queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator.host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
if task.run_once and iterator._play.strategy in add_internal_fqcns(('linear',)) and result.is_failed():
for host_name, state in prev_host_states.items():
if host_name == host.name:
continue
iterator.set_state_for_host(host_name, state)
iterator._play._removed_hosts.remove(host_name)
iterator.set_state_for_host(host.name, prev_host_state)
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
"""
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
"""
# by default, strategies should support throttling but we allow individual
# strategies to disable this and either forego supporting it or managing
# the throttling internally (as `free` does)
ALLOW_BASE_THROTTLING = True
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm._workers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
self._worker_queues = dict()
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
# Caches for get_host calls, to avoid calling excessively
# These values should be set at the top of the ``run`` method of each
# strategy plugin. Use ``_set_hosts_cache`` to set these values
self._hosts_cache = []
self._hosts_cache_all = []
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def _set_hosts_cache(self, play, refresh=True):
"""Responsible for setting _hosts_cache and _hosts_cache_all
See comment in ``__init__`` for the purpose of these caches
"""
if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
return
if not play.finalized and Templar(None).is_template(play.hosts):
_pattern = 'all'
else:
_pattern = play.hosts or 'all'
self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
def cleanup(self):
# close active persistent connections
for sock in self._active_connections.values():
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be IteratingStates.COMPLETE by
# this point, though the strategy may not advance the hosts itself.
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
iterator.get_next_task_for_host(self._inventory.hosts[host])
except KeyError:
iterator.get_next_task_for_host(self._inventory.get_host(host))
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(self._tqm._unreachable_hosts.keys()) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(iterator.get_failed_hosts()) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
self._set_hosts_cache(play, refresh=False)
ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
return [host for host in self._hosts_cache if host not in ignore]
def get_failed_hosts(self, play):
self._set_hosts_cache(play, refresh=False)
return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
"""
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
"""
vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
def _queue_task(self, host, task, task_vars, play_context):
""" handles queueing the task up to be sent to a worker """
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by two
# functions: linear.py::run(), and
# free.py::run() so we'd have to add to both to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# create a templar and template things we need later for the queuing process
templar = Templar(loader=self._loader, variables=task_vars)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
# and then queue the new task
try:
# Determine the "rewind point" of the worker list. This means we start
# iterating over the list of workers until the end of the list is found.
# Normally, that is simply the length of the workers list (as determined
# by the forks or serial setting), however a task/block/play may "throttle"
# that limit down.
rewind_point = len(self._workers)
if throttle > 0 and self.ALLOW_BASE_THROTTLING:
if task.run_once:
display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
else:
if throttle <= rewind_point:
display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
rewind_point = throttle
queued = False
starting_worker = self._cur_worker
while True:
if self._cur_worker >= rewind_point:
self._cur_worker = 0
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
# Pass WorkerProcess its strategy worker number so it can send an identifier along with intra-task requests
worker_prc = WorkerProcess(
self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader, self._cur_worker,
)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= rewind_point:
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
else:
host_list = [task_host.name]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
return [host_name or task.delegate_to]
def _set_always_delegated_facts(self, result, task):
"""Sets host facts for ``delegate_to`` hosts for facts that should
always be delegated
This operation mutates ``result`` to remove the always delegated facts
See ``ALWAYS_DELEGATE_FACT_PREFIXES``
"""
if task.delegate_to is None:
return
facts = result['ansible_facts']
always_keys = set()
_add = always_keys.add
for fact_key in facts:
for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
if fact_key.startswith(always_key):
_add(fact_key)
if always_keys:
_pop = facts.pop
always_facts = {
'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
}
host_list = self.get_delegated_hosts(result, task)
_set_host_facts = self._variable_manager.set_host_facts
for target_host in host_list:
_set_host_facts(target_host, always_facts)
def normalize_task_result(self, task_result):
"""Normalize a TaskResult to reference actual Host and Task objects
when only given the ``Host.name``, or the ``Task._uuid``
Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
Mutates the original object
"""
if isinstance(task_result._host, string_types):
# If the value is a string, it is ``Host.name``
task_result._host = self._inventory.get_host(to_text(task_result._host))
if isinstance(task_result._task, string_types):
# If the value is a string, it is ``Task._uuid``
queue_cache_entry = (task_result._host.name, task_result._task)
try:
found_task = self._queued_task_cache[queue_cache_entry]['task']
except KeyError:
# This should only happen due to an implicit task created by the
# TaskExecutor, restrict this behavior to the explicit use case
# of an implicit async_status task
if task_result._task_fields.get('action') != 'async_status':
raise
original_task = Task()
else:
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._task = original_task
return task_result
def search_handlers_by_notification(self, notification: str, iterator: PlayIterator) -> t.Generator[Handler, None, None]:
templar = Templar(None)
handlers = [h for b in reversed(iterator._play.handlers) for h in b.block]
# iterate in reversed order since last handler loaded with the same name wins
for handler in handlers:
if not handler.name:
continue
if not handler.cached_name:
if templar.is_template(handler.name):
templar.available_variables = self._variable_manager.get_vars(
play=iterator._play,
task=handler,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all
)
try:
handler.name = templar.template(handler.name)
except (UndefinedError, AnsibleUndefinedVariable) as e:
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
if not handler.listen:
display.warning(
"Handler '%s' is unusable because it has no listen topics and "
"the name could not be templated (host-specific variables are "
"not supported in handler names). The error: %s" % (handler.name, to_text(e))
)
continue
handler.cached_name = True
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
if notification in {
handler.name,
handler.get_name(include_role_fqcn=False),
handler.get_name(include_role_fqcn=True),
}:
yield handler
break
seen = set()
for handler in handlers:
if notification in handler.listen:
if handler.name and handler.name in seen:
continue
seen.add(handler.name)
yield handler
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
"""
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
"""
ret_results = []
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
original_host = task_result._host
original_task = task_result._task
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
# save the current state before failing it for later inspection
state_when_failed = iterator.get_state_for_host(original_host.name)
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
iterator.mark_host_failed(h)
else:
iterator.mark_host_failed(original_host)
state, dummy = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == IteratingStates.COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
# if we're iterating on the rescue portion of a block then
# we save the failed task in a special var for use
# within the rescue/always
if iterator.is_any_block_rescuing(state_when_failed):
self._tqm._stats.increment('rescued', original_host.name)
iterator._play._removed_hosts.remove(original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
ansible_failed_task=wrap_var(original_task.serialize()),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item and task_result.is_changed():
# only ensure that notified handlers exist, if so save the notifications for when
# handlers are actually flushed so the last defined handlers are executed,
# otherwise depending on the setting either error or warn
host_state = iterator.get_state_for_host(original_host.name)
for notification in result_item['_ansible_notify']:
handler = Sentinel
for handler in self.search_handlers_by_notification(notification, iterator):
if host_state.run_state == IteratingStates.HANDLERS:
# we're currently iterating handlers, so we need to expand this now
if handler.notify_host(original_host):
# NOTE even with notifications deduplicated this can still happen in case of handlers being
# notified multiple times using different names, like role name or fqcn
self._tqm.send_callback('v2_playbook_on_notify', handler, original_host)
else:
iterator.add_notification(original_host.name, notification)
display.vv(f"Notification for handler {notification} has been saved.")
break
if handler is Sentinel:
msg = (
f"The requested handler '{notification}' was not found in either the main handlers"
" list nor in the listening handlers list"
)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._inventory.add_dynamic_host(new_host_info, result_item)
# ensure host is available for subsequent plays
if result_item.get('changed') and new_host_info['host_name'] not in self._hosts_cache_all:
self._hosts_cache_all.append(new_host_info['host_name'])
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._inventory.add_dynamic_group(original_host, result_item)
if 'add_host' in result_item or 'add_group' in result_item:
item_vars = _get_item_vars(result_item, original_task)
found_task_vars = self._queued_task_cache.get((original_host.name, task_result._task._uuid))['task_vars']
if item_vars:
all_task_vars = combine_vars(found_task_vars, item_vars)
else:
all_task_vars = found_task_vars
all_task_vars[original_task.register] = wrap_var(result_item)
post_process_whens(result_item, original_task, Templar(self._loader), all_task_vars)
if original_task.loop or original_task.loop_with:
new_item_result = TaskResult(
task_result._host,
task_result._task,
result_item,
task_result._task_fields,
)
self._tqm.send_callback('v2_runner_item_on_ok', new_item_result)
if result_item.get('changed', False):
task_result._result['changed'] = True
if result_item.get('failed', False):
task_result._result['failed'] = True
if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
# Set facts that should always be on the delegated hosts
self._set_always_delegated_facts(result_item, original_task)
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action in C._ACTION_INCLUDE_VARS:
for (var_name, var_value) in result_item['ansible_facts'].items():
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
is_set_fact = original_task.action in C._ACTION_SET_FACT
if not is_set_fact or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if is_set_fact:
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
# register final results
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
# lookup the role in the role cache to make sure we're dealing
# with the correct object and mark it as executed
role_obj = self._get_cached_role(original_task, iterator._play)
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
"""
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
"""
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _copy_included_file(self, included_file):
"""
A proven safe and performant way to create a copy of an included file
"""
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars | included_file._vars
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False, handle_stats_and_callbacks=True):
"""
Loads an included YAML file of tasks, applying the optional set of variables.
Raises AnsibleError exception in case of a failure during including a file,
in such case the caller is responsible for marking the host(s) as failed
using PlayIterator.mark_host_failed().
"""
if handle_stats_and_callbacks:
display.deprecated(
"Reporting play recap stats and running callbacks functionality for "
"``include_tasks`` in ``StrategyBase._load_included_file`` is deprecated. "
"See ``https://github.com/ansible/ansible/pull/79260`` for guidance on how to "
"move the reporting into specific strategy plugins to account for "
"``include_role`` tasks as well.",
version="2.21"
)
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
if handle_stats_and_callbacks:
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleParserError:
raise
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
else:
reason = to_text(e)
if handle_stats_and_callbacks:
for r in included_file._results:
r._result['failed'] = True
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
raise AnsibleError(reason) from e
if handle_stats_and_callbacks:
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
if not task.when:
return True
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = meta_action
skip_reason = '%s conditional evaluated to False' % meta_action
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
# These don't support "when" conditionals
if meta_action in ('noop', 'refresh_inventory', 'reset_connection') and task.when:
self._cond_not_supported_warn(meta_action)
if meta_action == 'noop':
msg = "noop"
elif meta_action == 'flush_handlers':
if _evaluate_conditional(target_host):
host_state = iterator.get_state_for_host(target_host.name)
# actually notify proper handlers based on all notifications up to this point
for notification in list(host_state.handler_notifications):
for handler in self.search_handlers_by_notification(notification, iterator):
if handler.notify_host(target_host):
# NOTE even with notifications deduplicated this can still happen in case of handlers being
# notified multiple times using different names, like role name or fqcn
self._tqm.send_callback('v2_playbook_on_notify', handler, target_host)
iterator.clear_notification(target_host.name, notification)
if host_state.run_state == IteratingStates.HANDLERS:
raise AnsibleError('flush_handlers cannot be used as a handler')
if target_host.name not in self._tqm._unreachable_hosts:
host_state.pre_flushing_run_state = host_state.run_state
host_state.run_state = IteratingStates.HANDLERS
msg = "triggered running handlers for %s" % target_host.name
else:
skipped = True
skip_reason += ', not running handlers for %s' % target_host.name
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator.clear_host_errors(host)
msg = "cleared host errors"
else:
skipped = True
skip_reason += ', not clearing host error state for %s' % target_host.name
elif meta_action == 'end_batch':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.end_host(host.name)
msg = "ending batch"
else:
skipped = True
skip_reason += ', continuing current batch'
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.end_host(host.name)
# end_play is used in PlaybookExecutor/TQM to indicate that
# the whole play is supposed to be ended as opposed to just a batch
iterator.end_play = True
msg = "ending play"
else:
skipped = True
skip_reason += ', continuing play'
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator.end_host(target_host.name)
msg = "ending play for %s" % target_host.name
else:
skipped = True
skip_reason += ", continuing execution for %s" % target_host.name
# TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'role_complete':
if task.implicit:
role_obj = self._get_cached_role(task, iterator._play)
if target_host.name in role_obj._had_task_run:
role_obj._completed[target_host.name] = True
msg = 'role_complete for %s' % target_host.name
elif meta_action == 'end_role':
if _evaluate_conditional(target_host):
while True:
state, task = iterator.get_next_task_for_host(target_host, peek=True)
if task.action in C._ACTION_META and task.args.get("_raw_params") == "role_complete":
break
iterator.set_state_for_host(target_host.name, state)
display.debug("'%s' skipped because role has been ended via 'end_role'" % task)
msg = 'ending role %s for %s' % (task._role.get_name(), target_host.name)
else:
skipped = True
skip_reason += 'continuing role %s for %s' % (task._role.get_name(), target_host.name)
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist. This 'mostly' works here cause meta
# disregards the loop, but should not really use play_context at all
play_context.update_vars(all_vars)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
result['skip_reason'] = skip_reason
else:
result['changed'] = False
header = skip_reason if skipped else msg
if task.implicit:
display.debug(f"META: {header}")
else:
display.vv(f"META: {header}")
res = TaskResult(target_host, task, result)
if skipped:
self._tqm.send_callback('v2_runner_on_skipped', res)
return [res]
def _get_cached_role(self, task, play):
return play._get_cached_role(task._role)
def get_hosts_left(self, iterator):
""" returns list of available hosts for this iterator by filtering out unreachables """
hosts_left = []
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
hosts_left.append(self._inventory.hosts[host])
except KeyError:
hosts_left.append(self._inventory.get_host(host))
return hosts_left
def update_active_connections(self, results):
""" updates the current active persistent connections """
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
| 57,303
|
Python
|
.py
| 1,061
| 39.187559
| 146
| 0.574611
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,449
|
linear.py
|
ansible_ansible/lib/ansible/plugins/strategy/linear.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
DOCUMENTATION = """
name: linear
short_description: Executes tasks in a linear fashion
description:
- Task execution is in lockstep per host batch as defined by C(serial) (default all).
Up to the fork limit of hosts will execute each task at the same time and then
the next series of hosts until the batch is done, before going on to the next task.
version_added: "2.0"
notes:
- This was the default Ansible behaviour before 'strategy plugins' were introduced in 2.0.
author: Ansible Core Team
"""
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_text
from ansible.playbook.handler import Handler
from ansible.playbook.included_file import IncludedFile
from ansible.plugins.loader import action_loader
from ansible.plugins.strategy import StrategyBase
from ansible.template import Templar
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
"""
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
"""
state_task_per_host = {}
for host in hosts:
state, task = iterator.get_next_task_for_host(host, peek=True)
if task is not None:
state_task_per_host[host] = state, task
if not state_task_per_host:
return []
task_uuids = {t._uuid for s, t in state_task_per_host.values()}
_loop_cnt = 0
while _loop_cnt <= 1:
try:
cur_task = iterator.all_tasks[iterator.cur_task]
except IndexError:
# pick up any tasks left after clear_host_errors
iterator.cur_task = 0
_loop_cnt += 1
else:
iterator.cur_task += 1
if cur_task._uuid in task_uuids:
break
else:
# prevent infinite loop
raise AnsibleAssertionError(
'BUG: There seems to be a mismatch between tasks in PlayIterator and HostStates.'
)
host_tasks = []
for host, (state, task) in state_task_per_host.items():
if cur_task._uuid == task._uuid:
iterator.set_state_for_host(host.name, state)
host_tasks.append((host, task))
if cur_task.action in C._ACTION_META and cur_task.args.get('_raw_params') == 'flush_handlers':
iterator.all_tasks[iterator.cur_task:iterator.cur_task] = [h for b in iterator._play.handlers for h in b.block]
return host_tasks
def run(self, iterator, play_context):
"""
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
"""
# iterate over each task, while there is one left to run
result = self._tqm.RUN_OK
work_to_do = True
self._set_hosts_cache(iterator._play)
while work_to_do and not self._tqm._terminated:
try:
display.debug("getting the remaining hosts for this loop")
hosts_left = self.get_hosts_left(iterator)
display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
# flag set if task is set to any_errors_fatal
any_errors_fatal = False
results = []
for (host, task) in host_tasks:
if self._tqm._terminated:
break
run_once = False
work_to_do = True
display.debug("getting variables")
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
display.debug("done getting variables")
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
task_action = templar.template(task.action)
try:
action = action_loader.get(task_action, class_only=True, collection_list=task.collections)
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
action = None
if task_action in C._ACTION_META:
# for the linear strategy, we run meta tasks just once and for
# all hosts currently being iterated over rather than one host
results.extend(self._execute_meta(task, play_context, iterator, host))
if task.args.get('_raw_params', None) not in ('noop', 'reset_connection', 'end_host', 'role_complete', 'flush_handlers'):
run_once = True
if (task.any_errors_fatal or run_once) and not task.ignore_errors:
any_errors_fatal = True
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
run_once = action and getattr(action, 'BYPASS_HOST_LOOP', False) or templar.template(task.run_once)
try:
task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
except Exception as e:
display.debug(f"Failed to templalte task name ({task.name}), ignoring error and continuing: {e}")
if (task.any_errors_fatal or run_once) and not task.ignore_errors:
any_errors_fatal = True
if not callback_sent:
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
callback_sent = True
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
del task_vars
if isinstance(task, Handler):
if run_once:
task.clear_hosts()
else:
task.remove_host(host)
# if we're bypassing the host loop, break out now
if run_once:
break
results.extend(self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))))
# go to next host/task group
if skip_rest:
continue
display.debug("done queuing things up, now waiting for results queue to drain")
if self._pending_results > 0:
results.extend(self._wait_on_pending_results(iterator))
self.update_active_connections(results)
included_files = IncludedFile.process_include_results(
results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
if len(included_files) > 0:
display.debug("we have included files to process")
display.debug("generating all_blocks data")
all_blocks = dict((host, []) for host in hosts_left)
display.debug("done generating all_blocks data")
included_tasks = []
failed_includes_hosts = set()
for included_file in included_files:
display.debug("processing included file: %s" % included_file._filename)
is_handler = False
try:
if included_file._is_role:
new_ir = self._copy_included_file(included_file)
new_blocks, handler_blocks = new_ir.get_block_list(
play=iterator._play,
variable_manager=self._variable_manager,
loader=self._loader,
)
else:
is_handler = isinstance(included_file._task, Handler)
new_blocks = self._load_included_file(
included_file,
iterator=iterator,
is_handler=is_handler,
handle_stats_and_callbacks=False,
)
# let PlayIterator know about any new handlers included via include_role or
# import_role within include_role/include_taks
iterator.handlers = [h for b in iterator._play.handlers for h in b.block]
display.debug("iterating over new_blocks loaded from include file")
for new_block in new_blocks:
if is_handler:
for task in new_block.block:
task.notified_hosts = included_file._hosts[:]
final_block = new_block
else:
task_vars = self._variable_manager.get_vars(
play=iterator._play,
task=new_block.get_first_parent_include(),
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all,
)
display.debug("filtering new block on tags")
final_block = new_block.filter_tagged_tasks(task_vars)
display.debug("done filtering new block on tags")
included_tasks.extend(final_block.get_tasks())
for host in hosts_left:
if host in included_file._hosts:
all_blocks[host].append(final_block)
display.debug("done iterating over new_blocks loaded from include file")
except AnsibleParserError:
raise
except AnsibleError as e:
display.error(to_text(e), wrap_text=False)
for r in included_file._results:
r._result['failed'] = True
r._result['reason'] = str(e)
self._tqm._stats.increment('failures', r._host.name)
self._tqm.send_callback('v2_runner_on_failed', r)
failed_includes_hosts.add(r._host)
else:
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_playbook_on_include', included_file)
for host in failed_includes_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
# finally go through all of the hosts and append the
# accumulated blocks to their list of tasks
display.debug("extending task lists for all hosts with included blocks")
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
iterator.all_tasks[iterator.cur_task:iterator.cur_task] = included_tasks
display.debug("done extending task lists")
display.debug("done processing included files")
display.debug("results queue empty")
display.debug("checking for any_errors_fatal")
failed_hosts = []
unreachable_hosts = []
for res in results:
if res.is_failed():
failed_hosts.append(res._host.name)
elif res.is_unreachable():
unreachable_hosts.append(res._host.name)
if any_errors_fatal and (failed_hosts or unreachable_hosts):
for host in hosts_left:
if host.name not in failed_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
display.debug("done checking for any_errors_fatal")
display.debug("checking for max_fail_percentage")
if iterator._play.max_fail_percentage is not None and len(results) > 0:
percentage = iterator._play.max_fail_percentage / 100.0
if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage:
for host in hosts_left:
# don't double-mark hosts, or the iterator will potentially
# fail them out of the rescue/always states
if host.name not in failed_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result |= self._tqm.RUN_FAILED_BREAK_PLAY
display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage))
display.debug("done checking for max_fail_percentage")
display.debug("checking to see if all hosts have failed and the running result is not ok")
if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
display.debug("^ not ok, so returning result now")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
return result
display.debug("done checking to see if all hosts have failed")
except (IOError, EOFError) as e:
display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return self._tqm.RUN_UNKNOWN_ERROR
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| 17,418
|
Python
|
.py
| 298
| 38.35906
| 145
| 0.522919
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,450
|
debug.py
|
ansible_ansible/lib/ansible/plugins/strategy/debug.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
DOCUMENTATION = """
name: debug
short_description: Executes tasks in interactive debug session.
description:
- Task execution is 'linear' but controlled by an interactive debug session.
version_added: "2.1"
author: Kishin Yagami (!UNKNOWN)
"""
from ansible.plugins.strategy.linear import StrategyModule as LinearStrategyModule
class StrategyModule(LinearStrategyModule):
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self.debugger_active = True
| 1,205
|
Python
|
.py
| 28
| 40.178571
| 84
| 0.758738
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,451
|
free.py
|
ansible_ansible/lib/ansible/plugins/strategy/free.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
DOCUMENTATION = """
name: free
short_description: Executes tasks without waiting for all hosts
description:
- Task execution is as fast as possible per batch as defined by C(serial) (default all).
Ansible will not wait for other hosts to finish the current task before queuing more tasks for other hosts.
All hosts are still attempted for the current task, but it prevents blocking new tasks for hosts that have already finished.
- With the free strategy, unlike the default linear strategy, a host that is slow or stuck on a specific task
won't hold up the rest of the hosts and tasks.
version_added: "2.0"
author: Ansible Core Team
"""
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.handler import Handler
from ansible.playbook.included_file import IncludedFile
from ansible.plugins.loader import action_loader
from ansible.plugins.strategy import StrategyBase
from ansible.template import Templar
from ansible.module_utils.common.text.converters import to_text
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
# This strategy manages throttling on its own, so we don't want it done in queue_task
ALLOW_BASE_THROTTLING = False
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self._host_pinned = False
def run(self, iterator, play_context):
"""
The "free" strategy is a bit more complex, in that it allows tasks to
be sent to hosts as quickly as they can be processed. This means that
some hosts may finish very quickly if run tasks result in little or no
work being done versus other systems.
The algorithm used here also tries to be more "fair" when iterating
through hosts by remembering the last host in the list to be given a task
and starting the search from there as opposed to the top of the hosts
list again, which would end up favoring hosts near the beginning of the
list.
"""
# the last host to be given a task
last_host = 0
result = self._tqm.RUN_OK
# start with all workers being counted as being free
workers_free = len(self._workers)
self._set_hosts_cache(iterator._play)
if iterator._play.max_fail_percentage is not None:
display.warning("Using max_fail_percentage with the free strategy is not supported, as tasks are executed independently on each host")
work_to_do = True
while work_to_do and not self._tqm._terminated:
hosts_left = self.get_hosts_left(iterator)
if len(hosts_left) == 0:
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
work_to_do = False # assume we have no more work to do
starting_host = last_host # save current position so we know when we've looped back around and need to break
# try and find an unblocked host with a task to run
host_results = []
meta_task_dummy_results_count = 0
while True:
host = hosts_left[last_host]
display.debug("next free host: %s" % host)
host_name = host.get_name()
# peek at the next task for the host, to see if there's
# anything to do do for this host
(state, task) = iterator.get_next_task_for_host(host, peek=True)
display.debug("free host state: %s" % state, host=host_name)
display.debug("free host task: %s" % task, host=host_name)
# check if there is work to do, either there is a task or the host is still blocked which could
# mean that it is processing an include task and after its result is processed there might be
# more tasks to run
if (task or self._blocked_hosts.get(host_name, False)) and not self._tqm._unreachable_hosts.get(host_name, False):
display.debug("this host has work to do", host=host_name)
# set the flag so the outer loop knows we've still found
# some work which needs to be done
work_to_do = True
if not self._tqm._unreachable_hosts.get(host_name, False) and task:
# check to see if this host is blocked (still executing a previous task)
if not self._blocked_hosts.get(host_name, False):
display.debug("getting variables", host=host_name)
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
display.debug("done getting variables", host=host_name)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
if throttle > 0:
same_tasks = 0
for worker in self._workers:
if worker and worker.is_alive() and worker._task._uuid == task._uuid:
same_tasks += 1
display.debug("task: %s, same_tasks: %d" % (task.get_name(), same_tasks))
if same_tasks >= throttle:
break
# advance the host, mark the host blocked, and queue it
self._blocked_hosts[host_name] = True
iterator.set_state_for_host(host.name, state)
if isinstance(task, Handler):
task.remove_host(host)
try:
action = action_loader.get(task.action, class_only=True, collection_list=task.collections)
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
action = None
try:
task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
display.debug("done templating", host=host_name)
except Exception:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason", host=host_name)
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
if run_once:
if action and getattr(action, 'BYPASS_HOST_LOOP', False):
raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy "
"and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
else:
display.warning("Using run_once with the free strategy is not currently supported. This task will still be "
"executed for every host in the inventory list.")
if task.action in C._ACTION_META:
if self._host_pinned:
meta_task_dummy_results_count += 1
workers_free -= 1
self._execute_meta(task, play_context, iterator, target_host=host)
self._blocked_hosts[host_name] = False
else:
# handle step if needed, skip meta actions as they are used internally
if not self._step or self._take_step(task, host_name):
if task.any_errors_fatal:
display.warning("Using any_errors_fatal with the free strategy is not supported, "
"as tasks are executed independently on each host")
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
self._queue_task(host, task, task_vars, play_context)
# each task is counted as a worker being busy
workers_free -= 1
del task_vars
else:
display.debug("%s is blocked, skipping for now" % host_name)
# all workers have tasks to do (and the current host isn't done with the play).
# loop back to starting host and break out
if self._host_pinned and workers_free == 0 and work_to_do:
last_host = starting_host
break
# move on to the next host and make sure we
# haven't gone past the end of our hosts list
last_host += 1
if last_host > len(hosts_left) - 1:
last_host = 0
# if we've looped around back to the start, break out
if last_host == starting_host:
break
results = self._process_pending_results(iterator)
host_results.extend(results)
# each result is counted as a worker being free again
workers_free += len(results) + meta_task_dummy_results_count
self.update_active_connections(results)
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
if len(included_files) > 0:
all_blocks = dict((host, []) for host in hosts_left)
failed_includes_hosts = set()
for included_file in included_files:
display.debug("collecting new blocks for %s" % included_file)
is_handler = False
try:
if included_file._is_role:
new_ir = self._copy_included_file(included_file)
new_blocks, handler_blocks = new_ir.get_block_list(
play=iterator._play,
variable_manager=self._variable_manager,
loader=self._loader,
)
else:
is_handler = isinstance(included_file._task, Handler)
new_blocks = self._load_included_file(
included_file,
iterator=iterator,
is_handler=is_handler,
handle_stats_and_callbacks=False,
)
# let PlayIterator know about any new handlers included via include_role or
# import_role within include_role/include_taks
iterator.handlers = [h for b in iterator._play.handlers for h in b.block]
except AnsibleParserError:
raise
except AnsibleError as e:
display.error(to_text(e), wrap_text=False)
for r in included_file._results:
r._result['failed'] = True
r._result['reason'] = str(e)
self._tqm._stats.increment('failures', r._host.name)
self._tqm.send_callback('v2_runner_on_failed', r)
failed_includes_hosts.add(r._host)
continue
else:
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_playbook_on_include', included_file)
for new_block in new_blocks:
if is_handler:
for task in new_block.block:
task.notified_hosts = included_file._hosts[:]
final_block = new_block
else:
task_vars = self._variable_manager.get_vars(
play=iterator._play,
task=new_block.get_first_parent_include(),
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all,
)
final_block = new_block.filter_tagged_tasks(task_vars)
for host in hosts_left:
if host in included_file._hosts:
all_blocks[host].append(final_block)
display.debug("done collecting new blocks for %s" % included_file)
for host in failed_includes_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
display.debug("adding all collected blocks from %d included file(s) to iterator" % len(included_files))
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
display.debug("done adding collected blocks to iterator")
# pause briefly so we don't spin lock
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
# collect all the final results
results = self._wait_on_pending_results(iterator)
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| 16,038
|
Python
|
.py
| 262
| 41.557252
| 147
| 0.54053
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,452
|
urlsplit.py
|
ansible_ansible/lib/ansible/plugins/filter/urlsplit.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
name: urlsplit
version_added: "2.4"
short_description: get components from URL
description:
- Split a URL into its component parts.
positional: _input, query
options:
_input:
description: URL string to split.
type: str
required: true
query:
description: Specify a single component to return.
type: str
choices: ["fragment", "hostname", "netloc", "password", "path", "port", "query", "scheme", "username"]
"""
EXAMPLES = r"""
parts: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit }}'
# =>
# {
# "fragment": "fragment",
# "hostname": "www.acme.com",
# "netloc": "user:password@www.acme.com:9000",
# "password": "password",
# "path": "/dir/index.html",
# "port": 9000,
# "query": "query=term",
# "scheme": "http",
# "username": "user"
# }
hostname: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit("hostname") }}'
# => 'www.acme.com'
query: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit("query") }}'
# => 'query=term'
path: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit("path") }}'
# => '/dir/index.html'
"""
RETURN = r"""
_value:
description:
- A dictionary with components as keyword and their value.
- If O(query) is provided, a string or integer will be returned instead, depending on O(query).
type: any
"""
from urllib.parse import urlsplit
from ansible.errors import AnsibleFilterError
from ansible.utils import helpers
def split_url(value, query='', alias='urlsplit'):
results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
# If a query is supplied, make sure it's valid then return the results.
# If no option is supplied, return the entire dictionary.
if query:
if query not in results:
raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
return results[query]
else:
return results
# ---- Ansible filters ----
class FilterModule(object):
""" URI filter """
def filters(self):
return {
'urlsplit': split_url
}
| 2,571
|
Python
|
.py
| 68
| 32.867647
| 120
| 0.631791
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,453
|
urls.py
|
ansible_ansible/lib/ansible/plugins/filter/urls.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from functools import partial
from urllib.parse import unquote_plus
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'urldecode': partial(unquote_plus),
}
| 457
|
Python
|
.py
| 12
| 33.5
| 92
| 0.687215
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,454
|
encryption.py
|
ansible_ansible/lib/ansible/plugins/filter/encryption.py
|
# Copyright: (c) 2021, Ansible Project
from __future__ import annotations
from jinja2.runtime import Undefined
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.common.text.converters import to_native, to_bytes
from ansible.module_utils.six import string_types, binary_type
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.parsing.vault import is_encrypted, VaultSecret, VaultLib
from ansible.utils.display import Display
display = Display()
def do_vault(data, secret, salt=None, vault_id='filter_default', wrap_object=False, vaultid=None):
if not isinstance(secret, (string_types, binary_type, Undefined)):
raise AnsibleFilterTypeError("Secret passed is required to be a string, instead we got: %s" % type(secret))
if not isinstance(data, (string_types, binary_type, Undefined)):
raise AnsibleFilterTypeError("Can only vault strings, instead we got: %s" % type(data))
if vaultid is not None:
display.deprecated("Use of undocumented 'vaultid', use 'vault_id' instead", version='2.20')
if vault_id == 'filter_default':
vault_id = vaultid
else:
display.warning("Ignoring vaultid as vault_id is already set.")
vault = ''
vs = VaultSecret(to_bytes(secret))
vl = VaultLib()
try:
vault = vl.encrypt(to_bytes(data), vs, vault_id, salt)
except UndefinedError:
raise
except Exception as e:
raise AnsibleFilterError("Unable to encrypt: %s" % to_native(e), orig_exc=e)
if wrap_object:
vault = AnsibleVaultEncryptedUnicode(vault)
else:
vault = to_native(vault)
return vault
def do_unvault(vault, secret, vault_id='filter_default', vaultid=None):
if not isinstance(secret, (string_types, binary_type, Undefined)):
raise AnsibleFilterTypeError("Secret passed is required to be as string, instead we got: %s" % type(secret))
if not isinstance(vault, (string_types, binary_type, AnsibleVaultEncryptedUnicode, Undefined)):
raise AnsibleFilterTypeError("Vault should be in the form of a string, instead we got: %s" % type(vault))
if vaultid is not None:
display.deprecated("Use of undocumented 'vaultid', use 'vault_id' instead", version='2.20')
if vault_id == 'filter_default':
vault_id = vaultid
else:
display.warning("Ignoring vaultid as vault_id is already set.")
data = ''
vs = VaultSecret(to_bytes(secret))
vl = VaultLib([(vault_id, vs)])
if isinstance(vault, AnsibleVaultEncryptedUnicode):
vault.vault = vl
data = vault.data
elif is_encrypted(vault):
try:
data = vl.decrypt(vault)
except UndefinedError:
raise
except Exception as e:
raise AnsibleFilterError("Unable to decrypt: %s" % to_native(e), orig_exc=e)
else:
data = vault
return to_native(data)
class FilterModule(object):
""" Ansible vault jinja2 filters """
def filters(self):
filters = {
'vault': do_vault,
'unvault': do_unvault,
}
return filters
| 3,240
|
Python
|
.py
| 71
| 38.732394
| 116
| 0.687858
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,455
|
mathstuff.py
|
ansible_ansible/lib/ansible/plugins/filter/mathstuff.py
|
# Copyright 2014, Brian Coca <bcoca@ansible.com>
# Copyright 2017, Ken Celenza <ken@networktocode.com>
# Copyright 2017, Jason Edelman <jason@networktocode.com>
# Copyright 2017, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import itertools
import math
from collections.abc import Mapping, Iterable
from jinja2.filters import pass_environment
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.common.text import formatters
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.utils.display import Display
try:
from jinja2.filters import do_unique
HAS_UNIQUE = True
except ImportError:
HAS_UNIQUE = False
display = Display()
@pass_environment
# Use case_sensitive=None as a sentinel value, so we raise an error only when
# explicitly set and cannot be handle (by Jinja2 w/o 'unique' or fallback version)
def unique(environment, a, case_sensitive=None, attribute=None):
def _do_fail(e):
if case_sensitive is False or attribute:
raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
"as it does not support the parameters supplied", orig_exc=e)
error = e = None
try:
if HAS_UNIQUE:
c = list(do_unique(environment, a, case_sensitive=bool(case_sensitive), attribute=attribute))
except TypeError as e:
error = e
_do_fail(e)
except Exception as e:
error = e
_do_fail(e)
display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
if not HAS_UNIQUE or error:
# handle Jinja2 specific attributes when using Ansible's version
if case_sensitive is False or attribute:
raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive=False nor attribute parameters, "
"you need a newer version of Jinja2 that provides their version of the filter.")
c = []
for x in a:
if x not in c:
c.append(x)
return c
@pass_environment
def intersect(environment, a, b):
try:
c = list(set(a) & set(b))
except TypeError:
c = unique(environment, [x for x in a if x in b], True)
return c
@pass_environment
def difference(environment, a, b):
try:
c = list(set(a) - set(b))
except TypeError:
c = unique(environment, [x for x in a if x not in b], True)
return c
@pass_environment
def symmetric_difference(environment, a, b):
try:
c = list(set(a) ^ set(b))
except TypeError:
isect = intersect(environment, a, b)
c = [x for x in union(environment, a, b) if x not in isect]
return c
@pass_environment
def union(environment, a, b):
try:
c = list(set(a) | set(b))
except TypeError:
c = unique(environment, a + b, True)
return c
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise AnsibleFilterTypeError('log() can only be used on numbers: %s' % to_native(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise AnsibleFilterTypeError('pow() can only be used on numbers: %s' % to_native(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except (ValueError, TypeError) as e:
raise AnsibleFilterTypeError('root() can only be used on numbers: %s' % to_native(e))
def human_readable(size, isbits=False, unit=None):
""" Return a human-readable string """
try:
return formatters.bytes_to_human(size, isbits, unit)
except TypeError as e:
raise AnsibleFilterTypeError("human_readable() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
""" Return bytes count from a human-readable string """
try:
return formatters.human_to_bytes(size, default_unit, isbits)
except TypeError as e:
raise AnsibleFilterTypeError("human_to_bytes() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
def rekey_on_member(data, key, duplicates='error'):
"""
Rekey a dict of dicts on another member
May also create a dict from a list of dicts.
duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
value would be duplicated or to overwrite previous entries if that's the case.
"""
if duplicates not in ('error', 'overwrite'):
raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
new_obj = {}
# Ensure the positional args are defined - raise jinja2.exceptions.UndefinedError if not
bool(data) and bool(key)
if isinstance(data, Mapping):
iterate_over = data.values()
elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
iterate_over = data
else:
raise AnsibleFilterTypeError("Type is not a valid list, set, or dict")
for item in iterate_over:
if not isinstance(item, Mapping):
raise AnsibleFilterTypeError("List item is not a valid dict")
try:
key_elem = item[key]
except KeyError:
raise AnsibleFilterError("Key {0} was not found".format(key))
except TypeError as e:
raise AnsibleFilterTypeError(to_native(e))
except Exception as e:
raise AnsibleFilterError(to_native(e))
# Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
# minimum contain {key: key_elem}
if new_obj.get(key_elem, None):
if duplicates == 'error':
raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
elif duplicates == 'overwrite':
new_obj[key_elem] = item
else:
new_obj[key_elem] = item
return new_obj
class FilterModule(object):
""" Ansible math jinja2 filters """
def filters(self):
filters = {
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'product': itertools.product,
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
'rekey_on_member': rekey_on_member,
# zip
'zip': zip,
'zip_longest': itertools.zip_longest,
}
return filters
| 8,101
|
Python
|
.py
| 196
| 34.045918
| 127
| 0.659279
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,456
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/filter/__init__.py
|
# (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible import constants as C
from ansible.plugins import AnsibleJinja2Plugin
class AnsibleJinja2Filter(AnsibleJinja2Plugin):
def _no_options(self, *args, **kwargs):
raise NotImplementedError("Jinja2 filter plugins do not support option functions, they use direct arguments instead.")
| 457
|
Python
|
.py
| 8
| 54
| 126
| 0.788288
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,457
|
core.py
|
ansible_ansible/lib/ansible/plugins/filter/core.py
|
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import base64
import glob
import hashlib
import json
import ntpath
import os.path
import re
import shlex
import sys
import time
import uuid
import yaml
import datetime
from collections.abc import Mapping
from functools import partial
from random import Random, SystemRandom, shuffle
from jinja2.filters import pass_environment
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.six import string_types, integer_types, reraise, text_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.yaml import yaml_load, yaml_load_all
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.template import recursive_check_defined
from ansible.utils.display import Display
from ansible.utils.encrypt import do_encrypt, PASSLIB_AVAILABLE
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
display = Display()
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
def to_yaml(a, *args, **kw):
"""Make verbose, human-readable yaml"""
default_flow_style = kw.pop('default_flow_style', None)
try:
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, default_flow_style=default_flow_style, **kw)
except Exception as e:
raise AnsibleFilterError("to_yaml - %s" % to_native(e), orig_exc=e)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
"""Make verbose, human-readable yaml"""
try:
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
except Exception as e:
raise AnsibleFilterError("to_nice_yaml - %s" % to_native(e), orig_exc=e)
return to_text(transformed)
def to_json(a, *args, **kw):
""" Convert the value to JSON """
# defaults for filters
if 'vault_to_text' not in kw:
kw['vault_to_text'] = True
if 'preprocess_unsafe' not in kw:
kw['preprocess_unsafe'] = False
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, sort_keys=True, *args, **kw):
"""Make verbose, human-readable JSON"""
# TODO separators can be potentially exposed to the user as well
kw.pop('separators', None)
return to_json(a, indent=indent, sort_keys=sort_keys, separators=(',', ': '), *args, **kw)
def to_bool(a):
""" return a bool for the arg """
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ('yes', 'on', '1', 'true', 1):
return True
return False
def to_datetime(string, format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(string, format)
def strftime(string_format, second=None, utc=False):
""" return a date string using string. See https://docs.python.org/3/library/time.html#time.strftime for format """
if utc:
timefn = time.gmtime
else:
timefn = time.localtime
if second is not None:
try:
second = float(second)
except Exception:
raise AnsibleFilterError('Invalid value for epoch value (%s)' % second)
return time.strftime(string_format, timefn(second))
def quote(a):
""" return its argument quoted for shell usage """
if a is None:
a = u''
return shlex.quote(to_text(a))
def fileglob(pathname):
""" return list of matched regular files for glob """
return [g for g in glob.glob(pathname) if os.path.isfile(g)]
def regex_replace(value='', pattern='', replacement='', ignorecase=False, multiline=False, count=0, mandatory_count=0):
""" Perform a `re.sub` returning a string """
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
_re = re.compile(pattern, flags=flags)
(output, subs) = _re.subn(replacement, value, count=count)
if mandatory_count and mandatory_count != subs:
raise AnsibleFilterError("'%s' should match %d times, but matches %d times in '%s'"
% (pattern, mandatory_count, count, value))
return output
def regex_findall(value, regex, multiline=False, ignorecase=False):
""" Perform re.findall and return the list of matches """
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
""" Perform re.search and return the list of matches or a backref """
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val, none_val=None):
""" value ? true_val : false_val """
if value is None and none_val is not None:
return none_val
elif bool(value):
return true_val
else:
return false_val
def regex_escape(string, re_type='python'):
"""Escape all regular expressions special characters from STRING."""
string = to_text(string, errors='surrogate_or_strict', nonstring='simplerepr')
if re_type == 'python':
return re.escape(string)
elif re_type == 'posix_basic':
# list of BRE special chars:
# https://en.wikibooks.org/wiki/Regular_Expressions/POSIX_Basic_Regular_Expressions
return regex_replace(string, r'([].[^$*\\])', r'\\\1')
# TODO: implement posix_extended
# It's similar to, but different from python regex, which is similar to,
# but different from PCRE. It's possible that re.escape would work here.
# https://remram44.github.io/regex-cheatsheet/regex.html#programs
elif re_type == 'posix_extended':
raise AnsibleFilterError('Regex type (%s) not yet implemented' % re_type)
else:
raise AnsibleFilterError('Invalid regex type (%s)' % re_type)
def from_yaml(data):
if isinstance(data, string_types):
# The ``text_type`` call here strips any custom
# string wrapper class, so that CSafeLoader can
# read the data
return yaml_load(text_type(to_text(data, errors='surrogate_or_strict')))
return data
def from_yaml_all(data):
if isinstance(data, string_types):
# The ``text_type`` call here strips any custom
# string wrapper class, so that CSafeLoader can
# read the data
return yaml_load_all(text_type(to_text(data, errors='surrogate_or_strict')))
return data
@pass_environment
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except Exception:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try:
h = hashlib.new(hashtype)
except Exception as e:
# hash is not supported?
raise AnsibleFilterError(e)
h.update(to_bytes(data, errors='surrogate_or_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None, salt_size=None, rounds=None, ident=None):
passlib_mapping = {
'md5': 'md5_crypt',
'blowfish': 'bcrypt',
'sha256': 'sha256_crypt',
'sha512': 'sha512_crypt',
}
hashtype = passlib_mapping.get(hashtype, hashtype)
unknown_passlib_hashtype = False
if PASSLIB_AVAILABLE and hashtype not in passlib_mapping and hashtype not in passlib_mapping.values():
unknown_passlib_hashtype = True
display.deprecated(
f"Checking for unsupported password_hash passlib hashtype '{hashtype}'. "
"This will be an error in the future as all supported hashtypes must be documented.",
version='2.19'
)
try:
return do_encrypt(password, hashtype, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
except AnsibleError as e:
reraise(AnsibleFilterError, AnsibleFilterError(to_native(e), orig_exc=e), sys.exc_info()[2])
except Exception as e:
if unknown_passlib_hashtype:
# This can occur if passlib.hash has the hashtype attribute, but it has a different signature than the valid choices.
# In 2.19 this will replace the deprecation warning above and the extra exception handling can be deleted.
choices = ', '.join(passlib_mapping)
raise AnsibleFilterError(f"{hashtype} is not in the list of supported passlib algorithms: {choices}") from e
raise
def to_uuid(string, namespace=UUID_NAMESPACE_ANSIBLE):
uuid_namespace = namespace
if not isinstance(uuid_namespace, uuid.UUID):
try:
uuid_namespace = uuid.UUID(namespace)
except (AttributeError, ValueError) as e:
raise AnsibleFilterError("Invalid value '%s' for 'namespace': %s" % (to_native(namespace), to_native(e)))
# uuid.uuid5() requires bytes on Python 2 and bytes or text or Python 3
return to_text(uuid.uuid5(uuid_namespace, to_native(string, errors='surrogate_or_strict')))
def mandatory(a, msg=None):
"""Make a variable mandatory."""
from jinja2.runtime import Undefined
if isinstance(a, Undefined):
if a._undefined_name is not None:
name = "'%s' " % to_text(a._undefined_name)
else:
name = ''
if msg is not None:
raise AnsibleFilterError(to_native(msg))
raise AnsibleFilterError("Mandatory variable %s not defined." % name)
return a
def combine(*terms, **kwargs):
recursive = kwargs.pop('recursive', False)
list_merge = kwargs.pop('list_merge', 'replace')
if kwargs:
raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments")
# allow the user to do `[dict1, dict2, ...] | combine`
dictionaries = flatten(terms, levels=1)
# recursively check that every elements are defined (for jinja2)
recursive_check_defined(dictionaries)
if not dictionaries:
return {}
if len(dictionaries) == 1:
return dictionaries[0]
# merge all the dicts so that the dict at the end of the array have precedence
# over the dict at the beginning.
# we merge the dicts from the highest to the lowest priority because there is
# a huge probability that the lowest priority dict will be the biggest in size
# (as the low prio dict will hold the "default" values and the others will be "patches")
# and merge_hash create a copy of it's first argument.
# so high/right -> low/left is more efficient than low/left -> high/right
high_to_low_prio_dict_iterator = reversed(dictionaries)
result = next(high_to_low_prio_dict_iterator)
for dictionary in high_to_low_prio_dict_iterator:
result = merge_hash(dictionary, result, recursive, list_merge)
return result
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
@pass_environment
def extract(environment, item, container, morekeys=None):
if morekeys is None:
keys = [item]
elif isinstance(morekeys, list):
keys = [item] + morekeys
else:
keys = [item, morekeys]
value = container
for key in keys:
value = environment.getitem(value, key)
return value
def b64encode(string, encoding='utf-8'):
return to_text(base64.b64encode(to_bytes(string, encoding=encoding, errors='surrogate_or_strict')))
def b64decode(string, encoding='utf-8'):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
def flatten(mylist, levels=None, skip_nulls=True):
ret = []
for element in mylist:
if skip_nulls and element in (None, 'None', 'null'):
# ignore null items
continue
elif is_sequence(element):
if levels is None:
ret.extend(flatten(element, skip_nulls=skip_nulls))
elif levels >= 1:
# decrement as we go down the stack
ret.extend(flatten(element, levels=(int(levels) - 1), skip_nulls=skip_nulls))
else:
ret.append(element)
else:
ret.append(element)
return ret
def subelements(obj, subelements, skip_missing=False):
"""Accepts a dict or list of dicts, and a dotted accessor and produces a product
of the element and the results of the dotted accessor
>>> obj = [{"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}]
>>> subelements(obj, 'groups')
[({'name': 'alice', 'groups': ['wheel'], 'authorized': ['/tmp/alice/onekey.pub']}, 'wheel')]
"""
if isinstance(obj, dict):
element_list = list(obj.values())
elif isinstance(obj, list):
element_list = obj[:]
else:
raise AnsibleFilterError('obj must be a list of dicts or a nested dict')
if isinstance(subelements, list):
subelement_list = subelements[:]
elif isinstance(subelements, string_types):
subelement_list = subelements.split('.')
else:
raise AnsibleFilterTypeError('subelements must be a list or a string')
results = []
for element in element_list:
values = element
for subelement in subelement_list:
try:
values = values[subelement]
except KeyError:
if skip_missing:
values = []
break
raise AnsibleFilterError("could not find %r key in iterated item %r" % (subelement, values))
except TypeError:
raise AnsibleFilterTypeError("the key %s should point to a dictionary, got '%s'" % (subelement, values))
if not isinstance(values, list):
raise AnsibleFilterTypeError("the key %r should point to a list, got %r" % (subelement, values))
for value in values:
results.append((element, value))
return results
def dict_to_list_of_dict_key_value_elements(mydict, key_name='key', value_name='value'):
""" takes a dictionary and transforms it into a list of dictionaries,
with each having a 'key' and 'value' keys that correspond to the keys and values of the original """
if not isinstance(mydict, Mapping):
raise AnsibleFilterTypeError("dict2items requires a dictionary, got %s instead." % type(mydict))
ret = []
for key in mydict:
ret.append({key_name: key, value_name: mydict[key]})
return ret
def list_of_dict_key_value_elements_to_dict(mylist, key_name='key', value_name='value'):
""" takes a list of dicts with each having a 'key' and 'value' keys, and transforms the list into a dictionary,
effectively as the reverse of dict2items """
if not is_sequence(mylist):
raise AnsibleFilterTypeError("items2dict requires a list, got %s instead." % type(mylist))
try:
return dict((item[key_name], item[value_name]) for item in mylist)
except KeyError:
raise AnsibleFilterTypeError(
"items2dict requires each dictionary in the list to contain the keys '%s' and '%s', got %s instead."
% (key_name, value_name, mylist)
)
except TypeError:
raise AnsibleFilterTypeError("items2dict requires a list of dictionaries, got %s instead." % mylist)
def path_join(paths):
""" takes a sequence or a string, and return a concatenation
of the different members """
if isinstance(paths, string_types):
return os.path.join(paths)
if is_sequence(paths):
return os.path.join(*paths)
raise AnsibleFilterTypeError("|path_join expects string or sequence, got %s instead." % type(paths))
def commonpath(paths):
"""
Retrieve the longest common path from the given list.
:param paths: A list of file system paths.
:type paths: List[str]
:returns: The longest common path.
:rtype: str
"""
if not is_sequence(paths):
raise AnsibleFilterTypeError("|commonpath expects sequence, got %s instead." % type(paths))
return os.path.commonpath(paths)
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
'from_yaml_all': from_yaml_all,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'expandvars': partial(unicode_wrap, os.path.expandvars),
'path_join': path_join,
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
'commonpath': commonpath,
'normpath': partial(unicode_wrap, os.path.normpath),
# file glob
'fileglob': fileglob,
# types
'bool': to_bool,
'to_datetime': to_datetime,
# date formatting
'strftime': strftime,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digest of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksumming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# comment-style decoration
'comment': comment,
# debug
'type_debug': lambda o: o.__class__.__name__,
# Data structures
'combine': combine,
'extract': extract,
'flatten': flatten,
'dict2items': dict_to_list_of_dict_key_value_elements,
'items2dict': list_of_dict_key_value_elements_to_dict,
'subelements': subelements,
'split': partial(unicode_wrap, text_type.split),
}
| 23,099
|
Python
|
.py
| 556
| 33.561151
| 129
| 0.627851
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,458
|
jsonfile.py
|
ansible_ansible/lib/ansible/plugins/cache/jsonfile.py
|
# (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: jsonfile
short_description: JSON formatted files.
description:
- This cache uses JSON formatted, per host, files saved to the filesystem.
version_added: "1.9"
author: Ansible Core (@ansible-core)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the JSON files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
type: path
_prefix:
description: User defined prefix to use when creating the JSON files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
"""
import codecs
import json
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by json files.
"""
def _load(self, filepath):
# Valid JSON is always UTF-8 encoded.
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return json.load(f, cls=AnsibleJSONDecoder)
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
| 1,901
|
Python
|
.py
| 54
| 27.981481
| 92
| 0.652529
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,459
|
memory.py
|
ansible_ansible/lib/ansible/plugins/cache/memory.py
|
# (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: memory
short_description: RAM backed, non persistent
description:
- RAM backed cache that is not persistent.
- This is the default used if no other plugin is specified.
- There are no options to configure.
version_added: historical
author: core team (@ansible-core)
"""
from ansible.plugins.cache import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
def contains(self, key):
return key in self._cache
def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self._cache = data
| 1,218
|
Python
|
.py
| 36
| 27.833333
| 92
| 0.64494
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,460
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/cache/__init__.py
|
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import copy
import errno
import os
import tempfile
import time
from abc import abstractmethod
from collections.abc import MutableMapping
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common.file import S_IRWU_RG_RO
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.plugins import AnsiblePlugin
from ansible.plugins.loader import cache_loader
from ansible.utils.collection_loader import resource_from_fqcr
from ansible.utils.display import Display
display = Display()
class BaseCacheModule(AnsiblePlugin):
# Backwards compat only. Just import the global display instead
_display = display
def __init__(self, *args, **kwargs):
super(BaseCacheModule, self).__init__()
self.set_options(var_options=args, direct=kwargs)
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
class BaseFileCacheModule(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
try:
super(BaseFileCacheModule, self).__init__(*args, **kwargs)
self._cache_dir = self._get_cache_connection(self.get_option('_uri'))
self._timeout = float(self.get_option('_timeout'))
except KeyError:
self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self.plugin_name = resource_from_fqcr(self.__module__)
self._cache = {}
self.validate_cache_connection()
def _get_cache_connection(self, source):
if source:
try:
return os.path.expanduser(os.path.expandvars(source))
except TypeError:
pass
def validate_cache_connection(self):
if not self._cache_dir:
raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
"to be set (to a writeable directory path)" % self.plugin_name)
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError, IOError) as e:
raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
self.plugin_name, self._cache_dir))
def _get_cache_file_name(self, key):
prefix = self.get_option('_prefix')
if prefix:
cachefile = "%s/%s%s" % (self._cache_dir, prefix, key)
else:
cachefile = "%s/%s" % (self._cache_dir, key)
return cachefile
def get(self, key):
""" This checks the in memory cache first as the fact was not expired at 'gather time'
and it would be problematic if the key did expire after some long running tasks and
user gets 'undefined' error in the same play """
if key not in self._cache:
if self.has_expired(key) or key == "":
raise KeyError
cachefile = self._get_cache_file_name(key)
try:
value = self._load(cachefile)
self._cache[key] = value
except ValueError as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s. "
"Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
raise KeyError
except Exception as e:
raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
cachefile = self._get_cache_file_name(key)
tmpfile_handle, tmpfile_path = tempfile.mkstemp(dir=os.path.dirname(cachefile))
try:
try:
self._dump(value, tmpfile_path)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to write to '%s' : %s" % (self.plugin_name, tmpfile_path, to_bytes(e)))
try:
os.rename(tmpfile_path, cachefile)
os.chmod(cachefile, mode=S_IRWU_RG_RO)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to move '%s' to '%s' : %s" % (self.plugin_name, tmpfile_path, cachefile, to_bytes(e)))
finally:
try:
os.unlink(tmpfile_path)
except OSError:
pass
def has_expired(self, key):
if self._timeout == 0:
return False
cachefile = self._get_cache_file_name(key)
try:
st = os.stat(cachefile)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
return False
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
# When using a prefix we must remove it from the key name before
# checking the expiry and returning it to the caller. Keys that do not
# share the same prefix cannot be fetched from the cache.
prefix = self.get_option('_prefix')
prefix_length = len(prefix)
keys = []
for k in os.listdir(self._cache_dir):
if k.startswith('.') or not k.startswith(prefix):
continue
k = k[prefix_length:]
if not self.has_expired(k):
keys.append(k)
return keys
def contains(self, key):
cachefile = self._get_cache_file_name(key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
os.stat(cachefile)
return True
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
os.remove(self._get_cache_file_name(key))
except (OSError, IOError):
pass # TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
@abstractmethod
def _load(self, filepath):
"""
Read data from a filepath and return it as a value
:arg filepath: The filepath to read from.
:returns: The value stored in the filepath
This method reads from the file on disk and takes care of any parsing
and transformation of the data before returning it. The value
returned should be what Ansible would expect if it were uncached data.
.. note:: Filehandles have advantages but calling code doesn't know
whether this file is text or binary, should be decoded, or accessed via
a library function. Therefore the API uses a filepath and opens
the file inside of the method.
"""
pass
@abstractmethod
def _dump(self, value, filepath):
"""
Write data to a filepath
:arg value: The value to store
:arg filepath: The filepath to store it at
"""
pass
class CachePluginAdjudicator(MutableMapping):
"""
Intermediary between a cache dictionary and a CacheModule
"""
def __init__(self, plugin_name='memory', **kwargs):
self._cache = {}
self._retrieved = {}
self._plugin = cache_loader.get(plugin_name, **kwargs)
if not self._plugin:
raise AnsibleError('Unable to load the cache plugin (%s).' % plugin_name)
self._plugin_name = plugin_name
def update_cache_if_changed(self):
if self._retrieved != self._cache:
self.set_cache()
def set_cache(self):
for top_level_cache_key in self._cache.keys():
self._plugin.set(top_level_cache_key, self._cache[top_level_cache_key])
self._retrieved = copy.deepcopy(self._cache)
def load_whole_cache(self):
for key in self._plugin.keys():
self._cache[key] = self._plugin.get(key)
def __repr__(self):
return to_text(self._cache)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def _do_load_key(self, key):
load = False
if all([
key not in self._cache,
key not in self._retrieved,
self._plugin_name != 'memory',
self._plugin.contains(key),
]):
load = True
return load
def __getitem__(self, key):
if self._do_load_key(key):
try:
self._cache[key] = self._plugin.get(key)
except KeyError:
pass
else:
self._retrieved[key] = self._cache[key]
return self._cache[key]
def get(self, key, default=None):
if self._do_load_key(key):
try:
self._cache[key] = self._plugin.get(key)
except KeyError as e:
pass
else:
self._retrieved[key] = self._cache[key]
return self._cache.get(key, default)
def items(self):
return self._cache.items()
def values(self):
return self._cache.values()
def keys(self):
return self._cache.keys()
def pop(self, key, *args):
if args:
return self._cache.pop(key, args[0])
return self._cache.pop(key)
def __delitem__(self, key):
del self._cache[key]
def __setitem__(self, key, value):
self._cache[key] = value
def flush(self):
self._plugin.flush()
self._cache = {}
def update(self, value):
self._cache.update(value)
| 12,303
|
Python
|
.py
| 305
| 30.744262
| 159
| 0.596462
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,461
|
base.py
|
ansible_ansible/lib/ansible/plugins/cache/base.py
|
# (c) 2017, ansible by Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
# moved actual classes to __init__ kept here for backward compat with 3rd parties
from ansible.plugins.cache import BaseCacheModule, BaseFileCacheModule # pylint: disable=unused-import
| 907
|
Python
|
.py
| 19
| 46.684211
| 103
| 0.78354
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,462
|
url_windows.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/url_windows.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment:
# Common options for Ansible.ModuleUtils.WebRequest
DOCUMENTATION = r"""
options:
method:
description:
- The HTTP Method of the request.
type: str
follow_redirects:
description:
- Whether or the module should follow redirects.
- V(all) will follow all redirect.
- V(none) will not follow any redirect.
- V(safe) will follow only "safe" redirects, where "safe" means that the
client is only doing a C(GET) or C(HEAD) on the URI to which it is being
redirected.
- When following a redirected URL, the C(Authorization) header and any
credentials set will be dropped and not redirected.
choices:
- all
- none
- safe
default: safe
type: str
headers:
description:
- Extra headers to set on the request.
- This should be a dictionary where the key is the header name and the
value is the value for that header.
type: dict
http_agent:
description:
- Header to identify as, generally appears in web server logs.
- This is set to the C(User-Agent) header on a HTTP request.
default: ansible-httpget
type: str
maximum_redirection:
description:
- Specify how many times the module will redirect a connection to an
alternative URI before the connection fails.
- If set to V(0) or O(follow_redirects) is set to V(null), or V(safe) when
not doing a C(GET) or C(HEAD) it prevents all redirection.
default: 50
type: int
timeout:
description:
- Specifies how long the request can be pending before it times out (in
seconds).
- Set to V(0) to specify an infinite timeout.
default: 30
type: int
validate_certs:
description:
- If V(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed
certificates.
default: yes
type: bool
client_cert:
description:
- The path to the client certificate C(.pfx) that is used for X509
authentication. This path can either be the path to the C(.pfx) on the
filesystem or the PowerShell certificate path
C(Cert:\CurrentUser\My\<thumbprint>).
- The WinRM connection must be authenticated with C(CredSSP) or C(become)
is used on the task if the certificate file is not password protected.
- Other authentication types can set O(client_cert_password) when the cert
is password protected.
type: str
client_cert_password:
description:
- The password for O(client_cert) if the cert is password protected.
type: str
force_basic_auth:
description:
- By default the authentication header is only sent when a webservice
responses to an initial request with a 401 status. Since some basic auth
services do not properly send a 401, logins will fail.
- This option forces the sending of the Basic authentication header upon
the original request.
default: no
type: bool
url_username:
description:
- The username to use for authentication.
type: str
url_password:
description:
- The password for O(url_username).
type: str
use_default_credential:
description:
- Uses the current user's credentials when authenticating with a server
protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication.
- Sites that use C(Basic) auth will still require explicit credentials
through the O(url_username) and O(url_password) options.
- The module will only have access to the user's credentials if using
C(become) with a password, you are connecting with SSH using a password,
or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation).
- If not using C(become) or a different auth method to the ones stated
above, there will be no default credentials available and no
authentication will occur.
default: no
type: bool
use_proxy:
description:
- If V(no), it will not use the proxy defined in IE for the current user.
default: yes
type: bool
proxy_url:
description:
- An explicit proxy to use for the request.
- By default, the request will use the IE defined proxy unless O(use_proxy=no).
type: str
proxy_username:
description:
- The username to use for proxy authentication.
type: str
proxy_password:
description:
- The password for O(proxy_username).
type: str
proxy_use_default_credential:
description:
- Uses the current user's credentials when authenticating with a proxy host
protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication.
- Proxies that use C(Basic) auth will still require explicit credentials
through the O(proxy_username) and O(proxy_password) options.
- The module will only have access to the user's credentials if using
C(become) with a password, you are connecting with SSH using a password,
or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation).
- If not using C(become) or a different auth method to the ones stated
above, there will be no default credentials available and no proxy
authentication will occur.
default: no
type: bool
seealso:
- module: community.windows.win_inet_proxy
"""
| 5,491
|
Python
|
.py
| 143
| 33.475524
| 92
| 0.716451
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,463
|
vars_plugin_staging.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/vars_plugin_staging.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
stage:
description:
- Control when this vars plugin may be executed.
- Setting this option to V(all) will run the vars plugin after importing inventory and whenever it is demanded by a task.
- Setting this option to V(task) will only run the vars plugin whenever it is demanded by a task.
- Setting this option to V(inventory) will only run the vars plugin after parsing inventory.
- If this option is omitted, the global C(RUN_VARS_PLUGINS) configuration is used to determine when to execute the vars plugin.
choices: ['all', 'task', 'inventory']
version_added: "2.10"
type: str
"""
| 896
|
Python
|
.py
| 18
| 45.611111
| 133
| 0.718213
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,464
|
action_core.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/action_core.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) , Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
# WARNING: this is mostly here as a convenience for documenting core behaviours, no plugin outside of ansible-core should use this file
class ModuleDocFragment(object):
# requires action_common
DOCUMENTATION = r"""
attributes:
async:
support: none
become:
support: none
bypass_task_loop:
description: These tasks ignore the C(loop) and C(with_) keywords
core:
description: This is a 'core engine' feature and is not implemented like most task actions, so it is not overridable in any way via the plugin system.
support: full
connection:
support: none
ignore_conditional:
support: none
description: The action is not subject to conditional execution so it will ignore the C(when:) keyword
platform:
support: full
platforms: all
until:
description: Denotes if this action obeys until/retry/poll keywords
support: full
tags:
description: Allows for the 'tags' keyword to control the selection of this action for execution
support: full
"""
# also requires core above
IMPORT = r"""
attributes:
action:
details: While this action executes locally on the controller it is not governed by an action plugin
support: none
bypass_host_loop:
details: While the import can be host specific and runs per host it is not dealing with all available host variables,
use an include instead for those cases
support: partial
bypass_task_loop:
details: The task itself is not looped, but the loop is applied to each imported task
support: partial
delegation:
details: Since there are no connection nor facts, there is no sense in delegating imports
support: none
ignore_conditional:
details: While the action itself will ignore the conditional, it will be inherited by the imported tasks themselves
support: partial
tags:
details: Tags are not interpreted for this action, they are applied to the imported tasks
support: none
until:
support: none
"""
# also requires core above
INCLUDE = r"""
attributes:
action:
details: While this action executes locally on the controller it is not governed by an action plugin
support: none
bypass_host_loop:
support: none
bypass_task_loop:
support: none
delegation:
details: Since there are no connection nor facts, there is no sense in delegating includes
support: none
tags:
details: Tags are interpreted by this action but are not automatically inherited by the include tasks, see C(apply)
support: partial
"""
| 2,855
|
Python
|
.py
| 75
| 32.573333
| 156
| 0.714337
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,465
|
backup.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/backup.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Ansible, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = r"""
options:
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
"""
| 507
|
Python
|
.py
| 15
| 30.4
| 92
| 0.72541
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,466
|
decrypt.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/decrypt.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Brian Coca <bcoca@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""
options:
decrypt:
description:
- This option controls the auto-decryption of source files using vault.
type: bool
default: yes
version_added: '2.4'
"""
| 487
|
Python
|
.py
| 15
| 29.066667
| 92
| 0.711538
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,467
|
connection_pipelining.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/connection_pipelining.py
|
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# common shelldocumentation fragment
DOCUMENTATION = """
options:
pipelining:
default: false
description:
- Pipelining reduces the number of connection operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfers.
- This can result in a very significant performance improvement when enabled.
- However this can conflict with privilege escalation (C(become)).
For example, when using sudo operations you must first disable C(requiretty) in the sudoers file for the target hosts,
which is why this feature is disabled by default.
env:
- name: ANSIBLE_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
type: boolean
vars:
- name: ansible_pipelining
"""
| 1,182
|
Python
|
.py
| 27
| 34.296296
| 132
| 0.653646
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,468
|
url.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/url.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, John Barker <gundalow@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
type: str
force:
description:
- If V(yes) do not get a cached copy.
type: bool
default: no
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
use_proxy:
description:
- If V(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If V(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: yes
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without O(url_password) for sites that allow empty passwords.
type: str
url_password:
description:
- The password for use in HTTP basic authentication.
- If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used.
type: str
force_basic_auth:
description:
- Credentials specified with O(url_username) and O(url_password) should be passed in HTTP Header.
type: bool
default: no
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, O(client_key) is not required.
type: path
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If O(client_cert) contains both the certificate and key, this option is not required.
type: path
use_gssapi:
description:
- Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with O(url_username)/O(url_password) or with the GSSAPI env var
E(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is B(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
"""
| 2,799
|
Python
|
.py
| 70
| 34.985714
| 113
| 0.712661
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,469
|
checksum_common.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/checksum_common.py
|
# Copyright (c) 2024 ShIRann Chen <shirannx@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
checksum_algorithm:
description:
- Algorithm to determine checksum of file.
- Will throw an error if the host is unable to use specified algorithm.
- The remote host has to support the hashing method specified, V(md5)
can be unavailable if the host is FIPS-140 compliant.
- Availability might be restricted by the target system, for example FIPS systems won't allow md5 use
type: str
choices: [ md5, sha1, sha224, sha256, sha384, sha512 ]
default: sha1
aliases: [ checksum, checksum_algo ]
version_added: "2.0"
get_checksum:
description:
- Whether to return a checksum of the file.
type: bool
default: yes
"""
| 995
|
Python
|
.py
| 24
| 34.666667
| 109
| 0.673554
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,470
|
default_callback.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/default_callback.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
display_skipped_hosts:
name: Show skipped hosts
description: "Toggle to control displaying skipped task/host results in a task."
type: bool
default: yes
env:
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- key: display_skipped_hosts
section: defaults
display_ok_hosts:
name: Show 'ok' hosts
description: "Toggle to control displaying 'ok' task/host results in a task."
type: bool
default: yes
env:
- name: ANSIBLE_DISPLAY_OK_HOSTS
ini:
- key: display_ok_hosts
section: defaults
version_added: '2.7'
display_failed_stderr:
name: Use STDERR for failed and unreachable tasks
description: "Toggle to control whether failed and unreachable tasks are displayed to STDERR rather than STDOUT."
type: bool
default: no
env:
- name: ANSIBLE_DISPLAY_FAILED_STDERR
ini:
- key: display_failed_stderr
section: defaults
version_added: '2.7'
show_custom_stats:
name: Show custom stats
description: 'This adds the custom stats set via the set_stats plugin to the play recap.'
type: bool
default: no
env:
- name: ANSIBLE_SHOW_CUSTOM_STATS
ini:
- key: show_custom_stats
section: defaults
show_per_host_start:
name: Show per host task start
description: 'This adds output that shows when a task starts to execute for each host.'
type: bool
default: no
env:
- name: ANSIBLE_SHOW_PER_HOST_START
ini:
- key: show_per_host_start
section: defaults
version_added: '2.9'
check_mode_markers:
name: Show markers when running in check mode
description:
- Toggle to control displaying markers when running in check mode.
- "The markers are C(DRY RUN) at the beginning and ending of playbook execution (when calling C(ansible-playbook --check))
and C(CHECK MODE) as a suffix at every play and task that is run in check mode."
type: bool
default: no
version_added: '2.9'
env:
- name: ANSIBLE_CHECK_MODE_MARKERS
ini:
- key: check_mode_markers
section: defaults
show_task_path_on_failure:
name: Show file path on failed tasks
description:
- When a task fails, display the path to the file containing the failed task and the line number.
This information is displayed automatically for every task when running with C(-vv) or greater verbosity.
type: bool
default: no
env:
- name: ANSIBLE_SHOW_TASK_PATH_ON_FAILURE
ini:
- key: show_task_path_on_failure
section: defaults
version_added: '2.11'
"""
| 3,212
|
Python
|
.py
| 88
| 27.522727
| 130
| 0.613782
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,471
|
inventory_cache.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/inventory_cache.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# inventory cache
DOCUMENTATION = r"""
options:
cache:
description:
- Toggle to enable/disable the caching of the inventory's source data, requires a cache plugin setup to work.
type: bool
default: no
env:
- name: ANSIBLE_INVENTORY_CACHE
ini:
- section: inventory
key: cache
cache_plugin:
description:
- Cache plugin to use for the inventory's source data.
type: str
default: memory
env:
- name: ANSIBLE_CACHE_PLUGIN
- name: ANSIBLE_INVENTORY_CACHE_PLUGIN
ini:
- section: defaults
key: fact_caching
- section: inventory
key: cache_plugin
cache_timeout:
description:
- Cache duration in seconds.
default: 3600
type: int
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
- name: ANSIBLE_INVENTORY_CACHE_TIMEOUT
ini:
- section: defaults
key: fact_caching_timeout
- section: inventory
key: cache_timeout
cache_connection:
description:
- Cache connection data or path, read cache plugin documentation for specifics.
type: str
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
- name: ANSIBLE_INVENTORY_CACHE_CONNECTION
ini:
- section: defaults
key: fact_caching_connection
- section: inventory
key: cache_connection
cache_prefix:
description:
- Prefix to use for cache plugin files/tables.
default: ansible_inventory_
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
- name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX
ini:
- section: defaults
key: fact_caching_prefix
- section: inventory
key: cache_prefix
"""
| 1,928
|
Python
|
.py
| 69
| 22.246377
| 115
| 0.663612
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,472
|
template_common.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/template_common.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard template documentation fragment, use by template and win_template.
DOCUMENTATION = r"""
description:
- Templates are processed by the L(Jinja2 templating language,http://jinja.pocoo.org/docs/).
- Documentation on the template formatting can be found in the
L(Template Designer Documentation,http://jinja.pocoo.org/docs/templates/).
- Additional variables listed below can be used in templates.
- C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
- C(template_host) contains the node name of the template's machine.
- C(template_uid) is the numeric user id of the owner.
- C(template_path) is the path of the template.
- C(template_fullpath) is the absolute path of the template.
- C(template_destpath) is the path of the template on the remote system (added in 2.8).
- C(template_run_date) is the date that the template was rendered.
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller.
- This can be a relative or an absolute path.
- The file must be encoded with C(utf-8) but O(output_encoding) can be used to control the encoding of the output
template.
type: path
required: yes
dest:
description:
- Location to render the template to on the remote machine.
type: path
required: yes
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
type: str
choices: [ '\n', '\r', '\r\n' ]
default: '\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
type: str
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
type: str
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
type: str
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
type: str
default: '}}'
version_added: '2.4'
comment_start_string:
description:
- The string marking the beginning of a comment statement.
type: str
version_added: '2.12'
comment_end_string:
description:
- The string marking the end of a comment statement.
type: str
version_added: '2.12'
trim_blocks:
description:
- Determine when newlines should be removed from blocks.
- When set to V(yes) the first newline after a block is removed (block, not variable tag!).
type: bool
default: yes
version_added: '2.4'
lstrip_blocks:
description:
- Determine when leading spaces and tabs should be stripped.
- When set to V(yes) leading spaces and tabs are stripped from the start of a line to a block.
type: bool
default: no
version_added: '2.6'
force:
description:
- Determine when the file is being transferred if the destination already exists.
- When set to C(yes), replace the remote file when contents are different than the source.
- When set to C(no), the file will only be transferred if the destination does not exist.
type: bool
default: yes
output_encoding:
description:
- Overrides the encoding used to write the template file defined by O(dest).
- It defaults to C(utf-8), but any encoding supported by python can be used.
- The source template file must always be encoded using C(utf-8), for homogeneity.
type: str
default: utf-8
version_added: '2.7'
notes:
- Including a string that uses a date in the template will result in the template being marked 'changed' each time.
- Since Ansible 0.9, templates are loaded with O(trim_blocks=True).
- >
Also, you can override jinja2 settings by adding a special header to template file.
that is C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to C([% var %]) instead of C({{ var }}).
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>)
on Linux.
"""
| 4,634
|
Python
|
.py
| 115
| 36.382609
| 120
| 0.717102
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,473
|
constructed.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/constructed.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
strict:
description:
- If V(yes) make invalid entries a fatal error, otherwise skip and continue.
- Since it is possible to use facts in the expressions they might not always be available
and we ignore those errors by default.
type: bool
default: no
compose:
description: Create vars from jinja2 expressions.
type: dict
default: {}
groups:
description: Add hosts to group based on Jinja2 conditionals.
type: dict
default: {}
keyed_groups:
description: Add hosts to group based on the values of a variable.
type: list
default: []
elements: dict
suboptions:
parent_group:
type: str
description: parent group for keyed group.
prefix:
type: str
description: A keyed group name will start with this prefix.
default: ''
separator:
type: str
description: separator used to build the keyed group name.
default: "_"
key:
type: str
description:
- The key from input dictionary used to generate groups.
default_value:
description:
- The default value when the host variable's value is an empty string.
- This option is mutually exclusive with O(keyed_groups[].trailing_separator).
type: str
version_added: '2.12'
trailing_separator:
description:
- Set this option to V(false) to omit the O(keyed_groups[].separator) after the host variable when the value is an empty string.
- This option is mutually exclusive with O(keyed_groups[].default_value).
type: bool
default: true
version_added: '2.12'
use_extra_vars:
version_added: '2.11'
description: Merge extra vars into the available variables for composition (highest precedence).
type: bool
default: false
ini:
- section: inventory_plugins
key: use_extra_vars
env:
- name: ANSIBLE_INVENTORY_USE_EXTRA_VARS
leading_separator:
description:
- Use in conjunction with O(keyed_groups).
- By default, a keyed group that does not have a prefix or a separator provided will have a name that starts with an underscore.
- This is because the default prefix is V("") and the default separator is V("_").
- Set this option to V(false) to omit the leading underscore (or other separator) if no prefix is given.
- If the group name is derived from a mapping the separator is still used to concatenate the items.
- To not use a separator in the group name at all, set the separator for the keyed group to an empty string instead.
type: boolean
default: True
version_added: '2.11'
"""
| 2,973
|
Python
|
.py
| 78
| 31.858974
| 136
| 0.677966
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,474
|
validate.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/validate.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Ansible, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = r"""
options:
validate:
description:
- The validation command to run before copying the updated file into the final destination.
- A temporary file path is used to validate, passed in through C(%s) which must be present as in the examples below.
- Also, the command is passed securely so shell features such as expansion and pipes will not work.
- For an example on how to handle more complex validation than what this
option provides, see R(handling complex validation,complex_configuration_validation).
type: str
"""
| 833
|
Python
|
.py
| 17
| 45.470588
| 120
| 0.746617
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,475
|
return_common.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/return_common.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
RETURN = r"""
changed:
description: Whether the module affected changes on the target.
returned: always
type: bool
sample: false
failed:
description: Whether the module failed to execute.
returned: always
type: bool
sample: true
msg:
description: Human-readable message.
returned: as needed
type: str
sample: all ok
skipped:
description: Whether the module was skipped.
returned: always
type: bool
sample: false
results:
description: List of module results.
returned: when using a loop.
type: list
sample: [{changed: True, msg: 'first item changed'}, {changed: False, msg: 'second item ok'}]
exception:
description: Optional information from a handled error.
returned: on some errors
type: str
sample: Unknown error
"""
| 1,034
|
Python
|
.py
| 38
| 24.657895
| 95
| 0.747231
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,476
|
shell_windows.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/shell_windows.py
|
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Windows shell documentation fragment
# FIXME: set_module_language don't belong here but must be set so they don't fail when someone
# get_option('set_module_language') on this plugin
DOCUMENTATION = r"""
options:
async_dir:
description:
- Directory in which ansible will keep async job information.
- Before Ansible 2.8, this was set to C(remote_tmp + "\.ansible_async").
default: '%USERPROFILE%\.ansible_async'
ini:
- section: powershell
key: async_dir
vars:
- name: ansible_async_dir
version_added: '2.8'
remote_tmp:
description:
- Temporary directory to use on targets when copying files to the host.
default: '%TEMP%'
ini:
- section: powershell
key: remote_tmp
vars:
- name: ansible_remote_tmp
set_module_language:
description:
- Controls if we set the locale for modules when executing on the
target.
- Windows only supports V(no) as an option.
type: bool
default: 'no'
choices: ['no', False]
"""
| 1,234
|
Python
|
.py
| 38
| 28.026316
| 98
| 0.691534
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,477
|
files.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/files.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
# Note: mode is overridden by the copy and template modules so if you change the description
# here, you should also change it there.
DOCUMENTATION = r"""
options:
mode:
description:
- The permissions the resulting filesystem object should have.
- For those used to C(/usr/bin/chmod) remember that modes are actually octal numbers.
You must give Ansible enough information to parse them correctly.
For consistent results, quote octal numbers (for example, V('644') or V('1777')) so Ansible receives
a string and can do its own conversion from string into number.
Adding a leading zero (for example, V(0755)) works sometimes, but can fail in loops and some other circumstances.
- Giving Ansible a number without following either of these rules will end up with a decimal
number which will have unexpected results.
- As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, V(u+rwx) or
V(u=rw,g=r,o=r)).
- If O(mode) is not specified and the destination filesystem object B(does not) exist, the default C(umask) on the system will be used
when setting the mode for the newly created filesystem object.
- If O(mode) is not specified and the destination filesystem object B(does) exist, the mode of the existing filesystem object will be used.
- Specifying O(mode) is the best way to ensure filesystem objects are created with the correct permissions.
See CVE-2020-1736 for further details.
type: raw
owner:
description:
- Name of the user that should own the filesystem object, as would be fed to C(chown).
- When left unspecified, it uses the current user unless you are root, in which
case it can preserve the previous ownership.
- Specifying a numeric username will be assumed to be a user ID and not a username. Avoid numeric usernames to avoid this confusion.
type: str
group:
description:
- Name of the group that should own the filesystem object, as would be fed to C(chown).
- When left unspecified, it uses the current group of the current user unless you are root,
in which case it can preserve the previous ownership.
type: str
seuser:
description:
- The user part of the SELinux filesystem object context.
- By default it uses the V(system) policy, where applicable.
- When set to V(_default), it will use the C(user) portion of the policy if available.
type: str
serole:
description:
- The role part of the SELinux filesystem object context.
- When set to V(_default), it will use the C(role) portion of the policy if available.
type: str
setype:
description:
- The type part of the SELinux filesystem object context.
- When set to V(_default), it will use the C(type) portion of the policy if available.
type: str
selevel:
description:
- The level part of the SELinux filesystem object context.
- This is the MLS/MCS attribute, sometimes known as the C(range).
- When set to V(_default), it will use the C(level) portion of the policy if available.
type: str
unsafe_writes:
description:
- Influence when to use atomic operation to prevent data corruption or inconsistent reads from the target filesystem object.
- By default this module uses atomic operations to prevent data corruption or inconsistent reads from the target filesystem objects,
but sometimes systems are configured or just broken in ways that prevent this. One example is docker mounted filesystem objects,
which cannot be updated atomically from inside the container and can only be written in an unsafe manner.
- This option allows Ansible to fall back to unsafe methods of updating filesystem objects when atomic operations fail
(however, it doesn't force Ansible to perform unsafe writes).
- IMPORTANT! Unsafe writes are subject to race conditions and can lead to data corruption.
type: bool
default: no
version_added: '2.2'
attributes:
description:
- The attributes the resulting filesystem object should have.
- To get supported flags look at the man page for C(chattr) on the target system.
- This string should contain the attributes in the same order as the one displayed by C(lsattr).
- The C(=) operator is assumed as default, otherwise C(+) or C(-) operators need to be included in the string.
type: str
aliases: [ attr ]
version_added: '2.3'
"""
| 4,760
|
Python
|
.py
| 85
| 51.164706
| 143
| 0.736346
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,478
|
shell_common.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/shell_common.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# common shelldocumentation fragment
DOCUMENTATION = """
options:
remote_tmp:
description:
- Temporary directory to use on targets when executing tasks.
default: '~/.ansible/tmp'
env: [{name: ANSIBLE_REMOTE_TEMP}, {name: ANSIBLE_REMOTE_TMP}]
ini:
- section: defaults
key: remote_tmp
vars:
- name: ansible_remote_tmp
common_remote_group:
name: Enables changing the group ownership of temporary files and directories
default: null
description:
- Checked when Ansible needs to execute a module as a different user.
- If setfacl and chown both fail and do not let the different user access the module's files, they will be chgrp'd to this group.
- In order for this to work, the remote_user and become_user must share a common group and this setting must be set to that group.
env: [{name: ANSIBLE_COMMON_REMOTE_GROUP}]
vars:
- name: ansible_common_remote_group
ini:
- {key: common_remote_group, section: defaults}
version_added: "2.10"
system_tmpdirs:
description:
- "List of valid system temporary directories on the managed machine for Ansible to validate
O(remote_tmp) against, when specific permissions are needed. These must be world
readable, writable, and executable. This list should only contain directories which the
system administrator has pre-created with the proper ownership and permissions otherwise
security issues can arise."
- When O(remote_tmp) is required to be a system temp dir and it does not match any in the list,
the first one from the list will be used instead.
default: [ /var/tmp, /tmp ]
type: list
elements: string
env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
ini:
- section: defaults
key: system_tmpdirs
vars:
- name: ansible_system_tmpdirs
async_dir:
description:
- Directory in which ansible will keep async job information.
default: '~/.ansible_async'
env: [{name: ANSIBLE_ASYNC_DIR}]
ini:
- section: defaults
key: async_dir
vars:
- name: ansible_async_dir
admin_users:
type: list
elements: string
default: ['root', 'toor']
description:
- list of users to be expected to have admin privileges. This is used by the controller to
determine how to share temporary files between the remote user and the become user.
env:
- name: ANSIBLE_ADMIN_USERS
ini:
- section: defaults
key: admin_users
vars:
- name: ansible_admin_users
world_readable_temp:
version_added: '2.10'
default: False
description:
- This makes the temporary files created on the machine world-readable and will issue a warning instead of failing the task.
- It is useful when becoming an unprivileged user.
env:
- name: ANSIBLE_SHELL_ALLOW_WORLD_READABLE_TEMP
vars:
- name: ansible_shell_allow_world_readable_temp
ini:
- {key: allow_world_readable_tmpfiles, section: defaults}
type: boolean
"""
| 3,282
|
Python
|
.py
| 86
| 32.476744
| 136
| 0.694018
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,479
|
action_common_attributes.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/action_common_attributes.py
|
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = r"""
attributes:
check_mode:
description: Can run in check_mode and return changed status prediction without modifying target, if not supported the action will be skipped.
diff_mode:
description: Will return details on what has changed (or possibly needs changing in check_mode), when in diff mode
platform:
description: Target OS/families that can be operated against
support: N/A
"""
ACTIONGROUPS = r"""
attributes:
action_group:
description: Action is part of action_group(s), for convenient setting of module_defaults.
support: N/A
membership: []
"""
CONN = r"""
attributes:
become:
description: Is usable alongside become keywords
connection:
description: Uses the target's configured connection information to execute code on it
delegation:
description: Can be used in conjunction with delegate_to and related keywords
"""
FACTS = r"""
attributes:
facts:
description: Action returns an C(ansible_facts) dictionary that will update existing host facts
"""
FILES = r"""
attributes:
safe_file_operations:
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption
vault:
description: Can automatically decrypt Ansible vaulted files
"""
FLOW = r"""
attributes:
action:
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller
async:
description: Supports being used with the C(async) keyword
bypass_host_loop:
description:
- Forces a 'global' task that does not execute per host, this bypasses per host templating and serial,
throttle and other loop considerations
- Conditionals will work as if C(run_once) is being used, variables used will be from the first available host
- This action will not work normally outside of lockstep strategies
"""
RAW = r"""
attributes:
raw:
description: Indicates if an action takes a 'raw' or 'free form' string as an option and has it's own special parsing of it
"""
| 2,442
|
Python
|
.py
| 62
| 34.387097
| 148
| 0.722597
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,480
|
result_format_callback.py
|
ansible_ansible/lib/ansible/plugins/doc_fragments/result_format_callback.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
result_format:
name: Format of the task result
description:
- Define the task result format used in the callback output.
- These formats do not cause the callback to emit valid JSON or YAML formats.
- The output contains these formats interspersed with other non-machine parsable data.
type: str
default: json
env:
- name: ANSIBLE_CALLBACK_RESULT_FORMAT
ini:
- key: callback_result_format
section: defaults
choices:
- json
- yaml
version_added: '2.13'
pretty_results:
name: Configure output for readability
description:
- Configure the result format to be more readable.
- When O(result_format) is set to V(yaml) this option defaults to V(true), and defaults
to V(false) when configured to V(json).
- Setting this option to V(true) will force V(json) and V(yaml) results to always be pretty
printed regardless of verbosity.
- When set to V(true) and used with the V(yaml) result format, this option will
modify module responses in an attempt to produce a more human friendly output at the expense
of correctness, and should not be relied upon to aid in writing variable manipulations
or conditionals. For correctness, set this option to V(false) or set O(result_format) to V(json).
type: bool
default: null
env:
- name: ANSIBLE_CALLBACK_FORMAT_PRETTY
ini:
- key: callback_format_pretty
section: defaults
version_added: '2.13'
"""
| 1,946
|
Python
|
.py
| 45
| 34.2
| 109
| 0.639431
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,481
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/netconf/__init__.py
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from abc import abstractmethod
from functools import wraps
from ansible.errors import AnsibleError
from ansible.plugins import AnsiblePlugin
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.basic import missing_required_lib
try:
from ncclient.operations import RPCError
from ncclient.xml_ import to_xml, to_ele, NCElement
HAS_NCCLIENT = True
NCCLIENT_IMP_ERR = None
# paramiko and gssapi are incompatible and raise AttributeError not ImportError
# When running in FIPS mode, cryptography raises InternalError
# https://bugzilla.redhat.com/show_bug.cgi?id=1778939
except Exception as err:
HAS_NCCLIENT = False
NCCLIENT_IMP_ERR = err
try:
from lxml.etree import Element, SubElement, tostring, fromstring
except ImportError:
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring
def ensure_ncclient(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not HAS_NCCLIENT:
raise AnsibleError("%s: %s" % (missing_required_lib('ncclient'), to_native(NCCLIENT_IMP_ERR)))
return func(self, *args, **kwargs)
return wrapped
class NetconfBase(AnsiblePlugin):
"""
A base class for implementing Netconf connections
.. note:: Unlike most of Ansible, nearly all strings in
:class:`TerminalBase` plugins are byte strings. This is because of
how close to the underlying platform these plugins operate. Remember
to mark literal strings as byte string (``b"string"``) and to use
:func:`~ansible.module_utils.common.text.converters.to_bytes` and
:func:`~ansible.module_utils.common.text.converters.to_text` to avoid unexpected
problems.
List of supported rpc's:
:get: Retrieves running configuration and device state information
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
:validate: Validate the contents of the specified configuration.
:lock: Allows the client to lock the configuration system of a device.
:unlock: Release a configuration lock, previously obtained with the lock operation.
:copy_config: create or replace an entire configuration datastore with the contents of another complete
configuration datastore.
:get-schema: Retrieves the required schema from the device
:get_capabilities: Retrieves device information and supported rpc methods
For JUNOS:
:execute_rpc: RPC to be execute on remote device
:load_configuration: Loads given configuration on device
Note: rpc support depends on the capabilities of remote device.
:returns: Returns output received from remote device as byte string
Note: the 'result' or 'error' from response should to be converted to object
of ElementTree using 'fromstring' to parse output as xml doc
'get_capabilities()' returns 'result' as a json string.
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
data = conn.execute_rpc(rpc)
reply = fromstring(reply)
data = conn.get_capabilities()
json.loads(data)
conn.load_configuration(config=[''set system ntp server 1.1.1.1''], action='set', format='text')
"""
__rpc__ = ['rpc', 'get_config', 'get', 'edit_config', 'validate', 'copy_config', 'dispatch', 'lock', 'unlock',
'discard_changes', 'commit', 'get_schema', 'delete_config', 'get_device_operations']
def __init__(self, connection):
super(NetconfBase, self).__init__()
self._connection = connection
@property
def m(self):
return self._connection.manager
def rpc(self, name):
"""
RPC to be execute on remote device
:param name: Name of rpc in string format
:return: Received rpc response from remote host
"""
try:
obj = to_ele(name)
resp = self.m.rpc(obj)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
except RPCError as exc:
msg = exc.xml
raise Exception(to_xml(msg))
def get_config(self, source=None, filter=None):
"""
Retrieve all or part of a specified configuration
(by default entire configuration is retrieved).
:param source: Name of the configuration datastore being queried, defaults to running datastore
:param filter: This argument specifies the portion of the configuration data to retrieve
:return: Returns xml string containing the RPC response received from remote host
"""
if isinstance(filter, list):
filter = tuple(filter)
if not source:
source = 'running'
resp = self.m.get_config(source=source, filter=filter)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def get(self, filter=None, with_defaults=None):
"""
Retrieve device configuration and state information.
:param filter: This argument specifies the portion of the state data to retrieve
(by default entire state data is retrieved)
:param with_defaults: defines an explicit method of retrieving default values
from the configuration
:return: Returns xml string containing the RPC response received from remote host
"""
if isinstance(filter, list):
filter = tuple(filter)
resp = self.m.get(filter=filter, with_defaults=with_defaults)
response = resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
return response
def edit_config(self, config=None, format='xml', target='candidate', default_operation=None, test_option=None, error_option=None):
"""
Loads all or part of the specified *config* to the *target* configuration datastore.
:param config: Is the configuration, which must be rooted in the `config` element.
It can be specified either as a string or an :class:`~xml.etree.ElementTree.Element`.
:param format: The format of configuration eg. xml, text
:param target: Is the name of the configuration datastore being edited
:param default_operation: If specified must be one of { `"merge"`, `"replace"`, or `"none"` }
:param test_option: If specified must be one of { `"test_then_set"`, `"set"` }
:param error_option: If specified must be one of { `"stop-on-error"`, `"continue-on-error"`, `"rollback-on-error"` }
The `"rollback-on-error"` *error_option* depends on the `:rollback-on-error` capability.
:return: Returns xml string containing the RPC response received from remote host
"""
if config is None:
raise ValueError('config value must be provided')
resp = self.m.edit_config(config, format=format, target=target, default_operation=default_operation, test_option=test_option,
error_option=error_option)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def validate(self, source='candidate'):
"""
Validate the contents of the specified configuration.
:param source: Is the name of the configuration datastore being validated or `config` element
containing the configuration subtree to be validated
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.validate(source=source)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def copy_config(self, source, target):
"""
Create or replace an entire configuration datastore with the contents of another complete configuration datastore.
:param source: Is the name of the configuration datastore to use as the source of the copy operation or `config`
element containing the configuration subtree to copy
:param target: Is the name of the configuration datastore to use as the destination of the copy operation
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.copy_config(source, target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def dispatch(self, rpc_command=None, source=None, filter=None):
"""
Execute rpc on the remote device eg. dispatch('clear-arp-table')
:param rpc_command: specifies rpc command to be dispatched either in plain text or in xml element format (depending on command)
:param source: name of the configuration datastore being queried
:param filter: specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:return: Returns xml string containing the RPC response received from remote host
"""
if rpc_command is None:
raise ValueError('rpc_command value must be provided')
resp = self.m.dispatch(fromstring(rpc_command), source=source, filter=filter)
if isinstance(resp, NCElement):
# In case xml reply is transformed or namespace is removed in
# ncclient device specific handler return modified xml response
result = resp.data_xml
elif hasattr(resp, 'data_ele') and resp.data_ele:
# if data node is present in xml response return the xml string
# with data node as root
result = resp.data_xml
else:
# return raw xml string received from host with rpc-reply as the root node
result = resp.xml
return result
def lock(self, target="candidate"):
"""
Allows the client to lock the configuration system of a device.
:param target: is the name of the configuration datastore to lock,
defaults to candidate datastore
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.lock(target=target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def unlock(self, target="candidate"):
"""
Release a configuration lock, previously obtained with the lock operation.
:param target: is the name of the configuration datastore to unlock,
defaults to candidate datastore
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.unlock(target=target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def discard_changes(self):
"""
Revert the candidate configuration to the currently running configuration.
Any uncommitted changes are discarded.
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.discard_changes()
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def commit(self, confirmed=False, timeout=None, persist=None):
"""
Commit the candidate configuration as the device's new current configuration.
Depends on the `:candidate` capability.
A confirmed commit (i.e. if *confirmed* is `True`) is reverted if there is no
followup commit within the *timeout* interval. If no timeout is specified the
confirm timeout defaults to 600 seconds (10 minutes).
A confirming commit may have the *confirmed* parameter but this is not required.
Depends on the `:confirmed-commit` capability.
:param confirmed: whether this is a confirmed commit
:param timeout: specifies the confirm timeout in seconds
:param persist: make the confirmed commit survive a session termination,
and set a token on the ongoing confirmed commit
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.commit(confirmed=confirmed, timeout=timeout, persist=persist)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def get_schema(self, identifier=None, version=None, format=None):
"""
Retrieve a named schema, with optional revision and type.
:param identifier: name of the schema to be retrieved
:param version: version of schema to get
:param format: format of the schema to be retrieved, yang is the default
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.get_schema(identifier, version=version, format=format)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def delete_config(self, target):
"""
delete a configuration datastore
:param target: specifies the name or URL of configuration datastore to delete
:return: Returns xml string containing the RPC response received from remote host
"""
resp = self.m.delete_config(target)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
def locked(self, target):
return self.m.locked(target)
@abstractmethod
def get_capabilities(self):
"""
Retrieves device information and supported
rpc methods by device platform and return result
as a string
:return: Netconf session capability
"""
pass
@staticmethod
def guess_network_os(obj):
"""
Identifies the operating system of network device.
:param obj: ncclient manager connection instance
:return: The name of network operating system.
"""
pass
def get_base_rpc(self):
"""
Returns list of base rpc method supported by remote device
:return: List of RPC supported
"""
return self.__rpc__
def put_file(self, source, destination):
"""
Copies file to remote host
:param source: Source location of file
:param destination: Destination file path
:return: Returns xml string containing the RPC response received from remote host
"""
pass
def fetch_file(self, source, destination):
"""
Fetch file from remote host
:param source: Source location of file
:param destination: Source location of file
:return: Returns xml string containing the RPC response received from remote host
"""
pass
def get_device_operations(self, server_capabilities):
"""
Retrieve remote host capability from Netconf server hello message.
:param server_capabilities: Server capabilities received during Netconf session initialization
:return: Remote host capabilities in dictionary format
"""
operations = {}
capabilities = '\n'.join(server_capabilities)
operations['supports_commit'] = ':candidate' in capabilities
operations['supports_defaults'] = ':with-defaults' in capabilities
operations['supports_confirm_commit'] = ':confirmed-commit' in capabilities
operations['supports_startup'] = ':startup' in capabilities
operations['supports_xpath'] = ':xpath' in capabilities
operations['supports_writable_running'] = ':writable-running' in capabilities
operations['supports_validate'] = ':validate' in capabilities
operations['lock_datastore'] = []
if operations['supports_writable_running']:
operations['lock_datastore'].append('running')
if operations['supports_commit']:
operations['lock_datastore'].append('candidate')
if operations['supports_startup']:
operations['lock_datastore'].append('startup')
operations['supports_lock'] = bool(operations['lock_datastore'])
return operations
# TODO Restore .xml, when ncclient supports it for all platforms
| 17,094
|
Python
|
.py
| 323
| 43.934985
| 135
| 0.676555
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,482
|
__init__.py
|
ansible_ansible/lib/ansible/_vendor/__init__.py
|
# (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import pkgutil
import sys
import warnings
# This package exists to host vendored top-level Python packages for downstream packaging. Any Python packages
# installed beneath this one will be masked from the Ansible loader, and available from the front of sys.path.
# It is expected that the vendored packages will be loaded very early, so a warning will be fired on import of
# the top-level ansible package if any packages beneath this are already loaded at that point.
#
# Python packages may be installed here during downstream packaging using something like:
# pip install --upgrade -t (path to this dir) cryptography pyyaml packaging jinja2
# mask vendored content below this package from being accessed as an ansible subpackage
__path__ = []
def _ensure_vendored_path_entry():
"""
Ensure that any downstream-bundled content beneath this package is available at the top of sys.path
"""
# patch our vendored dir onto sys.path
vendored_path_entry = os.path.dirname(__file__)
vendored_module_names = set(m[1] for m in pkgutil.iter_modules([vendored_path_entry], '')) # m[1] == m.name
if vendored_module_names:
# patch us early to load vendored deps transparently
if vendored_path_entry in sys.path:
# handle reload case by removing the existing entry, wherever it might be
sys.path.remove(vendored_path_entry)
sys.path.insert(0, vendored_path_entry)
already_loaded_vendored_modules = set(sys.modules.keys()).intersection(vendored_module_names)
if already_loaded_vendored_modules:
warnings.warn('One or more Python packages bundled by this ansible-core distribution were already '
'loaded ({0}). This may result in undefined behavior.'.format(', '.join(sorted(already_loaded_vendored_modules))))
_ensure_vendored_path_entry()
| 2,033
|
Python
|
.py
| 34
| 54.647059
| 140
| 0.738431
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,483
|
release.py
|
ansible_ansible/packaging/release.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Manage upstream ansible-core releases."""
from __future__ import annotations
import argparse
import contextlib
import dataclasses
import datetime
import enum
import functools
import gzip
import hashlib
import http.client
import inspect
import json
import math
import os
import pathlib
import re
import secrets
import shlex
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import typing as t
import urllib.error
import urllib.parse
import urllib.request
import venv
import webbrowser
import zipfile
import jinja2
from packaging.version import Version, InvalidVersion
# region CLI Framework
C = t.TypeVar("C", bound=t.Callable[..., None])
def path_to_str(value: t.Any) -> str:
"""Return the given value converted to a string suitable for use as a command line argument."""
return f"{value}/" if isinstance(value, pathlib.Path) and value.is_dir() else str(value)
@t.overload
def run(*args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str, capture_output: t.Literal[True]) -> CompletedProcess:
...
@t.overload
def run(*args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str, capture_output: t.Literal[False]) -> None:
...
@t.overload
def run(*args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str) -> None:
...
def run(
*args: t.Any,
env: dict[str, t.Any] | None,
cwd: pathlib.Path | str,
capture_output: bool = False,
) -> CompletedProcess | None:
"""Run the specified command."""
args = [arg.relative_to(cwd) if isinstance(arg, pathlib.Path) else arg for arg in args]
str_args = tuple(path_to_str(arg) for arg in args)
str_env = {key: path_to_str(value) for key, value in env.items()} if env is not None else None
display.show(f"--> {shlex.join(str_args)}", color=Display.CYAN)
try:
p = subprocess.run(str_args, check=True, text=True, env=str_env, cwd=cwd, capture_output=capture_output)
except subprocess.CalledProcessError as ex:
# improve type hinting and include stdout/stderr (if any) in the message
raise CalledProcessError(
message=str(ex),
cmd=str_args,
status=ex.returncode,
stdout=ex.stdout,
stderr=ex.stderr,
) from None
if not capture_output:
return None
# improve type hinting
return CompletedProcess(
stdout=p.stdout,
stderr=p.stderr,
)
@contextlib.contextmanager
def suppress_when(error_as_warning: bool) -> t.Generator[None, None, None]:
"""Conditionally convert an ApplicationError in the provided context to a warning."""
if error_as_warning:
try:
yield
except ApplicationError as ex:
display.warning(ex)
else:
yield
class ApplicationError(Exception):
"""A fatal application error which will be shown without a traceback."""
class CalledProcessError(Exception):
"""Results from a failed process."""
def __init__(self, message: str, cmd: tuple[str, ...], status: int, stdout: str | None, stderr: str | None) -> None:
if stdout and (stdout := stdout.strip()):
message += f"\n>>> Standard Output\n{stdout}"
if stderr and (stderr := stderr.strip()):
message += f"\n>>> Standard Error\n{stderr}"
super().__init__(message)
self.cmd = cmd
self.status = status
self.stdout = stdout
self.stderr = stderr
@dataclasses.dataclass(frozen=True)
class CompletedProcess:
"""Results from a completed process."""
stdout: str
stderr: str
class Display:
"""Display interface for sending output to the console."""
CLEAR = "\033[0m"
RED = "\033[31m"
BLUE = "\033[34m"
PURPLE = "\033[35m"
CYAN = "\033[36m"
def fatal(self, message: t.Any) -> None:
"""Print a fatal message to the console."""
self.show(f"FATAL: {message}", color=self.RED)
def warning(self, message: t.Any) -> None:
"""Print a warning message to the console."""
self.show(f"WARNING: {message}", color=self.PURPLE)
def show(self, message: t.Any, color: str | None = None) -> None:
"""Print a message to the console."""
print(f"{color or self.CLEAR}{message}{self.CLEAR}", flush=True)
class CommandFramework:
"""
Simple command line framework inspired by nox.
Argument parsing is handled by argparse. Each function annotated with an instance of this class becomes a subcommand.
Options are shared across all commands, and are defined by providing kwargs when creating an instance of this class.
Options are only defined for commands which have a matching parameter.
The name of each kwarg is the option name, which will be prefixed with `--` and with underscores converted to dashes.
The value of each kwarg is passed as kwargs to ArgumentParser.add_argument. Passing None results in an internal only parameter.
The following custom kwargs are recognized and are not passed to add_argument:
name - Override the positional argument (option) passed to add_argument.
exclusive - Put the argument in an exclusive group of the given name.
"""
def __init__(self, **kwargs: dict[str, t.Any] | None) -> None:
self.commands: list[t.Callable[..., None]] = []
self.arguments = kwargs
self.parsed_arguments: argparse.Namespace | None = None
def __call__(self, func: C) -> C:
"""Register the decorated function as a CLI command."""
self.commands.append(func)
return func
def run(self, *args: t.Callable[..., None], **kwargs) -> None:
"""Run the specified command(s), using any provided internal args."""
for arg in args:
self._run(arg, **kwargs)
def main(self) -> None:
"""Main program entry point."""
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(metavar="COMMAND", required=True)
for func in self.commands:
func_parser = subparsers.add_parser(self._format_command_name(func), description=func.__doc__, help=func.__doc__)
func_parser.set_defaults(func=func)
exclusive_groups = {}
signature = inspect.signature(func)
for name in signature.parameters:
if name not in self.arguments:
raise RuntimeError(f"The '{name}' argument, used by '{func.__name__}', has not been defined.")
if (arguments := self.arguments.get(name)) is None:
continue # internal use
arguments = arguments.copy()
exclusive = arguments.pop("exclusive", None)
# noinspection PyProtectedMember, PyUnresolvedReferences
command_parser: argparse._ActionsContainer
if exclusive:
if exclusive not in exclusive_groups:
exclusive_groups[exclusive] = func_parser.add_mutually_exclusive_group()
command_parser = exclusive_groups[exclusive]
else:
command_parser = func_parser
if option_name := arguments.pop("name", None):
arguments.update(dest=name)
else:
option_name = f"--{name.replace('_', '-')}"
command_parser.add_argument(option_name, **arguments)
try:
# noinspection PyUnresolvedReferences
import argcomplete
except ImportError:
pass
else:
argcomplete.autocomplete(parser)
self.parsed_arguments = parser.parse_args()
try:
self.run(self.parsed_arguments.func)
except ApplicationError as ex:
display.fatal(ex)
sys.exit(1)
def _run(self, func: t.Callable[..., None], **kwargs) -> None:
"""Run the specified command, using any provided internal args."""
signature = inspect.signature(func)
func_args = {name: getattr(self.parsed_arguments, name) for name in signature.parameters if hasattr(self.parsed_arguments, name)}
func_args.update({name: value for name, value in kwargs.items() if name in signature.parameters})
printable_args = ", ".join(f"{name}={repr(value)}" for name, value in func_args.items())
label = f"{self._format_command_name(func)}({printable_args})"
display.show(f"==> {label}", color=Display.BLUE)
try:
func(**func_args)
except BaseException:
display.show(f"!!! {label}", color=Display.RED)
raise
display.show(f"<== {label}", color=Display.BLUE)
@staticmethod
def _format_command_name(func: t.Callable[..., None]) -> str:
"""Return the friendly name of the given command."""
return func.__name__.replace("_", "-")
display = Display()
# endregion
# region Data Classes
@dataclasses.dataclass(frozen=True)
class GitHubRelease:
"""Details required to create a GitHub release."""
user: str
repo: str
tag: str
target: str
title: str
body: str
pre_release: bool
@dataclasses.dataclass(frozen=True)
class PullRequest:
"""Details required to create a pull request."""
upstream_user: str
upstream_repo: str
upstream_branch: str
user: str
repo: str
branch: str
title: str
body: str
@dataclasses.dataclass(frozen=True)
class Remote:
"""Details about a git remote."""
name: str
user: str
repo: str
@dataclasses.dataclass(frozen=True)
class Remotes:
"""Details about git removes."""
fork: Remote
upstream: Remote
@dataclasses.dataclass(frozen=True)
class GitState:
"""Details about the state of the git repository."""
remotes: Remotes
branch: str | None
commit: str
@dataclasses.dataclass(frozen=True)
class ReleaseArtifact:
"""Information about a release artifact on PyPI."""
package_type: str
package_label: str
url: str
size: int
digest: str
digest_algorithm: str
@dataclasses.dataclass(frozen=True)
class ReleaseAnnouncement:
"""Contents of a release announcement."""
subject: str
body: str
# endregion
# region Utilities
SCRIPT_DIR = pathlib.Path(__file__).parent.resolve()
CHECKOUT_DIR = SCRIPT_DIR.parent
ANSIBLE_LIB_DIR = CHECKOUT_DIR / "lib"
ANSIBLE_DIR = ANSIBLE_LIB_DIR / "ansible"
ANSIBLE_BIN_DIR = CHECKOUT_DIR / "bin"
ANSIBLE_RELEASE_FILE = ANSIBLE_DIR / "release.py"
ANSIBLE_REQUIREMENTS_FILE = CHECKOUT_DIR / "requirements.txt"
ANSIBLE_PYPROJECT_TOML_FILE = CHECKOUT_DIR / "pyproject.toml"
DIST_DIR = CHECKOUT_DIR / "dist"
VENV_DIR = DIST_DIR / ".venv" / "release"
CHANGELOGS_DIR = CHECKOUT_DIR / "changelogs"
CHANGELOGS_FRAGMENTS_DIR = CHANGELOGS_DIR / "fragments"
ANSIBLE_VERSION_PATTERN = re.compile("^__version__ = '(?P<version>.*)'$", re.MULTILINE)
ANSIBLE_VERSION_FORMAT = "__version__ = '{version}'"
DIGEST_ALGORITHM = "sha256"
# These endpoint names match those defined as defaults in twine.
# See: https://github.com/pypa/twine/blob/9c2c0a1c535155931c3d879359330cb836950c6a/twine/utils.py#L82-L85
PYPI_ENDPOINTS = dict(
pypi="https://pypi.org/pypi",
testpypi="https://test.pypi.org/pypi",
)
PIP_ENV = dict(
PIP_REQUIRE_VIRTUALENV="yes",
PIP_DISABLE_PIP_VERSION_CHECK="yes",
)
class VersionMode(enum.Enum):
"""How to handle the ansible-core version."""
DEFAULT = enum.auto()
"""Do not allow development versions. Do not allow post release versions."""
STRIP_POST = enum.auto()
"""Do not allow development versions. Strip the post release from the version if present."""
REQUIRE_POST = enum.auto()
"""Do not allow development versions. Require a post release version."""
REQUIRE_DEV_POST = enum.auto()
"""Require a development or post release version."""
ALLOW_DEV_POST = enum.auto()
"""Allow development and post release versions."""
def apply(self, version: Version) -> Version:
"""Apply the mode to the given version and return the result."""
original_version = version
release_component_count = 3
if len(version.release) != release_component_count:
raise ApplicationError(f"Version {version} contains {version.release} release components instead of {release_component_count}.")
if version.epoch:
raise ApplicationError(f"Version {version} contains an epoch component: {version.epoch}")
if version.local is not None:
raise ApplicationError(f"Version {version} contains a local component: {version.local}")
if version.is_devrelease and version.is_postrelease:
raise ApplicationError(f"Version {version} is a development and post release version.")
if self == VersionMode.ALLOW_DEV_POST:
return version
if self == VersionMode.REQUIRE_DEV_POST:
if not version.is_devrelease and not version.is_postrelease:
raise ApplicationError(f"Version {version} is not a development or post release version.")
return version
if version.is_devrelease:
raise ApplicationError(f"Version {version} is a development release: {version.dev}")
if self == VersionMode.STRIP_POST:
if version.is_postrelease:
version = Version(str(version).removesuffix(f".post{version.post}"))
display.warning(f"Using version {version} by stripping the post release suffix from version {original_version}.")
return version
if self == VersionMode.REQUIRE_POST:
if not version.is_postrelease:
raise ApplicationError(f"Version {version} is not a post release version.")
return version
if version.is_postrelease:
raise ApplicationError(f"Version {version} is a post release.")
if self == VersionMode.DEFAULT:
return version
raise NotImplementedError(self)
@t.overload
def git(*args: t.Any, capture_output: t.Literal[True]) -> CompletedProcess:
...
@t.overload
def git(*args: t.Any, capture_output: t.Literal[False]) -> None:
...
@t.overload
def git(*args: t.Any) -> None:
...
def git(*args: t.Any, capture_output: t.Literal[True] | t.Literal[False] = False) -> CompletedProcess | None:
"""Run the specified git command."""
return run("git", *args, env=None, cwd=CHECKOUT_DIR, capture_output=capture_output)
def get_commit(rev: str | None = None) -> str:
"""Return the commit associated with the given rev, or HEAD if no rev is given."""
try:
return git("rev-parse", "--quiet", "--verify", "--end-of-options", f"{rev or 'HEAD'}^{{commit}}", capture_output=True).stdout.strip()
except CalledProcessError as ex:
if ex.status == 1 and not ex.stdout and not ex.stderr:
raise ApplicationError(f"Could not find commit: {rev}") from None
raise
def prepare_pull_request(version: Version, branch: str, title: str, add: t.Iterable[pathlib.Path | str], allow_stale: bool) -> PullRequest:
"""Return pull request parameters using the provided details."""
git_state = get_git_state(version, allow_stale)
if not git("status", "--porcelain", "--untracked-files=no", capture_output=True).stdout.strip():
raise ApplicationError("There are no changes to commit. Did you skip a step?")
upstream_branch = get_upstream_branch(version)
body = create_pull_request_body(title)
git("checkout", "-b", branch)
git("add", *add)
git("commit", "-m", title)
git("push", "--set-upstream", git_state.remotes.fork.name, branch)
git("checkout", git_state.branch or git_state.commit)
git("branch", "-d", branch)
pr = PullRequest(
upstream_user=git_state.remotes.upstream.user,
upstream_repo=git_state.remotes.upstream.repo,
upstream_branch=upstream_branch,
user=git_state.remotes.fork.user,
repo=git_state.remotes.fork.repo,
branch=branch,
title=title,
body=body,
)
return pr
def create_github_release(release: GitHubRelease) -> None:
"""Open a browser tab for creating the given GitHub release."""
# See: https://docs.github.com/en/repositories/releasing-projects-on-github/automation-for-release-forms-with-query-parameters
params = dict(
tag=release.tag,
target=release.target,
title=release.title,
body=release.body,
prerelease=1 if release.pre_release else 0,
)
query_string = urllib.parse.urlencode(params)
url = f"https://github.com/{release.user}/{release.repo}/releases/new?{query_string}"
display.show("Opening release creation page in new tab using default browser ...")
webbrowser.open_new_tab(url)
def create_pull_request(pr: PullRequest) -> None:
"""Open a browser tab for creating the given pull request."""
# See: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/using-query-parameters-to-create-a-pull-request # noqa
params = dict(
quick_pull=1,
title=pr.title,
body=pr.body,
)
query_string = urllib.parse.urlencode(params)
url = f"https://github.com/{pr.upstream_user}/{pr.upstream_repo}/compare/{pr.upstream_branch}...{pr.user}:{pr.repo}:{pr.branch}?{query_string}"
display.show("Opening pull request in new tab using default browser ...")
webbrowser.open_new_tab(url)
def create_pull_request_body(title: str) -> str:
"""Return a simple pull request body created from the given title."""
body = f"""
##### SUMMARY
{title}
##### ISSUE TYPE
Feature Pull Request
"""
return body.lstrip()
def get_remote(name: str, push: bool) -> Remote:
"""Return details about the specified remote."""
remote_url = git("remote", "get-url", *(["--push"] if push else []), name, capture_output=True).stdout.strip()
remote_match = re.search(r"[@/]github[.]com[:/](?P<user>[^/]+)/(?P<repo>[^.]+)(?:[.]git)?$", remote_url)
if not remote_match:
raise RuntimeError(f"Unable to identify the user and repo in the '{name}' remote: {remote_url}")
remote = Remote(
name=name,
user=remote_match.group("user"),
repo=remote_match.group("repo"),
)
return remote
@functools.cache
def get_remotes() -> Remotes:
"""Return details about the remotes we need to use."""
# assume the devel branch has its upstream remote pointing to the user's fork
fork_remote_name = git("branch", "--list", "devel", "--format=%(upstream:remotename)", capture_output=True).stdout.strip()
if not fork_remote_name:
raise ApplicationError("Could not determine the remote for your fork of Ansible.")
display.show(f"Detected '{fork_remote_name}' as the remote for your fork of Ansible.")
# assume there is only one ansible org remote, which would allow release testing using another repo in the same org without special configuration
all_remotes = git("remote", "-v", capture_output=True).stdout.strip().splitlines()
ansible_remote_names = set(line.split()[0] for line in all_remotes if re.search(r"[@/]github[.]com[:/]ansible/", line))
if not ansible_remote_names:
raise ApplicationError(f"Could not determine the remote which '{fork_remote_name}' was forked from.")
if len(ansible_remote_names) > 1:
raise ApplicationError(f"Found multiple candidates for the remote from which '{fork_remote_name}' was forked from: {', '.join(ansible_remote_names)}")
upstream_remote_name = ansible_remote_names.pop()
display.show(f"Detected '{upstream_remote_name}' as the remote from which '{fork_remote_name}' was forked from.")
if fork_remote_name == upstream_remote_name:
raise ApplicationError("The remote for your fork of Ansible cannot be the same as the remote from which it was forked.")
remotes = Remotes(
fork=get_remote(fork_remote_name, push=True),
upstream=get_remote(upstream_remote_name, push=False),
)
return remotes
def get_upstream_branch(version: Version) -> str:
"""Return the upstream branch name for the given version."""
return f"stable-{version.major}.{version.minor}"
def get_git_state(version: Version, allow_stale: bool) -> GitState:
"""Return information about the current state of the git repository."""
remotes = get_remotes()
upstream_branch = get_upstream_branch(version)
git("fetch", remotes.upstream.name, upstream_branch)
upstream_ref = f"{remotes.upstream.name}/{upstream_branch}"
upstream_commit = get_commit(upstream_ref)
commit = get_commit()
if commit != upstream_commit:
with suppress_when(allow_stale):
raise ApplicationError(f"The current commit ({commit}) does not match {upstream_ref} ({upstream_commit}).")
branch = git("branch", "--show-current", capture_output=True).stdout.strip() or None
state = GitState(
remotes=remotes,
branch=branch,
commit=commit,
)
return state
@functools.cache
def ensure_venv() -> dict[str, t.Any]:
"""Ensure the release venv is ready and return the env vars needed to use it."""
# TODO: consider freezing the ansible and release requirements along with their dependencies
ansible_requirements = ANSIBLE_REQUIREMENTS_FILE.read_text()
release_requirements = """
build
twine
"""
requirements_file = CHECKOUT_DIR / "test/lib/ansible_test/_data/requirements/sanity.changelog.txt"
requirements_content = requirements_file.read_text()
requirements_content += ansible_requirements
requirements_content += release_requirements
requirements_hash = hashlib.sha256(requirements_content.encode()).hexdigest()[:8]
python_version = ".".join(map(str, sys.version_info[:2]))
venv_dir = VENV_DIR / python_version / requirements_hash
venv_bin_dir = venv_dir / "bin"
venv_requirements_file = venv_dir / "requirements.txt"
venv_marker_file = venv_dir / "marker.txt"
env = os.environ.copy()
env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment
env.update(
PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])),
)
if not venv_marker_file.exists():
display.show(f"Creating a Python {python_version} virtual environment ({requirements_hash}) ...")
if venv_dir.exists():
shutil.rmtree(venv_dir)
venv.create(venv_dir, with_pip=True)
venv_requirements_file.write_text(requirements_content)
run("pip", "install", "-r", venv_requirements_file, env=env | PIP_ENV, cwd=CHECKOUT_DIR)
venv_marker_file.touch()
return env
def get_pypi_project(repository: str, project: str, version: Version | None = None) -> dict[str, t.Any]:
"""Return the project JSON from PyPI for the specified repository, project and version (optional)."""
endpoint = PYPI_ENDPOINTS[repository]
if version:
url = f"{endpoint}/{project}/{version}/json"
else:
url = f"{endpoint}/{project}/json"
opener = urllib.request.build_opener()
response: http.client.HTTPResponse
try:
with opener.open(url) as response:
data = json.load(response)
except urllib.error.HTTPError as ex:
if version:
target = f'{project!r} version {version}'
else:
target = f'{project!r}'
if ex.status == http.HTTPStatus.NOT_FOUND:
raise ApplicationError(f"Could not find {target} on PyPI.") from None
raise RuntimeError(f"Failed to get {target} from PyPI.") from ex
return data
def get_ansible_version(version: str | None = None, /, commit: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version:
"""Parse and return the current ansible-core version, the provided version or the version from the provided commit."""
if version and commit:
raise ValueError("Specify only one of: version, commit")
if version:
source = ""
else:
if commit:
current = git("show", f"{commit}:{ANSIBLE_RELEASE_FILE.relative_to(CHECKOUT_DIR)}", capture_output=True).stdout
else:
current = ANSIBLE_RELEASE_FILE.read_text()
if not (match := ANSIBLE_VERSION_PATTERN.search(current)):
raise RuntimeError("Failed to get the ansible-core version.")
version = match.group("version")
source = f" in '{ANSIBLE_RELEASE_FILE}'"
try:
parsed_version = Version(version)
except InvalidVersion:
raise ApplicationError(f"Invalid version{source}: {version}") from None
parsed_version = mode.apply(parsed_version)
return parsed_version
def get_next_version(version: Version, /, final: bool = False, pre: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version:
"""Return the next version after the specified version."""
# TODO: consider using development versions instead of post versions after a release is published
pre = pre or ""
micro = version.micro
if version.is_devrelease:
# The next version of a development release is the same version without the development component.
if final:
pre = ""
elif not pre and version.pre is not None:
pre = f"{version.pre[0]}{version.pre[1]}"
elif not pre:
pre = "b1" # when there is no existing pre and none specified, advance to b1
elif version.is_postrelease:
# The next version of a post release is the next pre-release *or* micro release component.
if final:
pre = ""
elif not pre and version.pre is not None:
pre = f"{version.pre[0]}{version.pre[1] + 1}"
elif not pre:
pre = "rc1" # when there is no existing pre and none specified, advance to rc1
if version.pre is None:
micro = version.micro + 1
else:
raise ApplicationError(f"Version {version} is not a development or post release version.")
version = f"{version.major}.{version.minor}.{micro}{pre}"
return get_ansible_version(version, mode=mode)
def check_ansible_version(current_version: Version, requested_version: Version) -> None:
"""Verify the requested version is valid for the current version."""
if requested_version.release[:2] != current_version.release[:2]:
raise ApplicationError(f"Version {requested_version} does not match the major and minor portion of the current version: {current_version}")
if requested_version < current_version:
raise ApplicationError(f"Version {requested_version} is older than the current version: {current_version}")
# TODO: consider additional checks to avoid mistakes when incrementing the release version
def set_ansible_version(current_version: Version, requested_version: Version) -> None:
"""Set the current ansible-core version."""
check_ansible_version(current_version, requested_version)
if requested_version == current_version:
return
display.show(f"Updating version {current_version} to {requested_version} ...")
current = ANSIBLE_RELEASE_FILE.read_text()
updated = ANSIBLE_VERSION_PATTERN.sub(ANSIBLE_VERSION_FORMAT.format(version=requested_version), current)
if current == updated:
raise RuntimeError("Failed to set the ansible-core version.")
ANSIBLE_RELEASE_FILE.write_text(updated)
def get_latest_setuptools_version() -> Version:
"""Return the latest setuptools version found on PyPI."""
data = get_pypi_project('pypi', 'setuptools')
version = Version(data['info']['version'])
return version
def set_setuptools_upper_bound(requested_version: Version) -> None:
"""Set the upper bound on setuptools in pyproject.toml."""
current = ANSIBLE_PYPROJECT_TOML_FILE.read_text()
pattern = re.compile(r'^(?P<begin>requires = \["setuptools >= )(?P<lower>[^,]+)(?P<middle>, <= )(?P<upper>[^"]+)(?P<end>".*)$', re.MULTILINE)
match = pattern.search(current)
if not match:
raise ApplicationError(f"Unable to find the 'requires' entry in: {ANSIBLE_PYPROJECT_TOML_FILE.relative_to(CHECKOUT_DIR)}")
current_version = Version(match.group('upper'))
if requested_version == current_version:
return
display.show(f"Updating setuptools upper bound from {current_version} to {requested_version} ...")
updated = pattern.sub(fr'\g<begin>\g<lower>\g<middle>{requested_version}\g<end>', current)
if current == updated:
raise RuntimeError("Failed to set the setuptools upper bound.")
ANSIBLE_PYPROJECT_TOML_FILE.write_text(updated)
def create_reproducible_sdist(original_path: pathlib.Path, output_path: pathlib.Path, mtime: int) -> None:
"""Read the specified sdist and write out a new copy with uniform file metadata at the specified location."""
with tarfile.open(original_path) as original_archive:
with tempfile.TemporaryDirectory() as temp_dir:
tar_file = pathlib.Path(temp_dir) / "sdist.tar"
with tarfile.open(tar_file, mode="w") as tar_archive:
for original_info in original_archive.getmembers(): # type: tarfile.TarInfo
tar_archive.addfile(create_reproducible_tar_info(original_info, mtime), original_archive.extractfile(original_info))
with tar_file.open("rb") as tar_archive:
with gzip.GzipFile(output_path, "wb", mtime=mtime) as output_archive:
shutil.copyfileobj(tar_archive, output_archive)
def create_reproducible_tar_info(original: tarfile.TarInfo, mtime: int) -> tarfile.TarInfo:
"""Return a copy of the given TarInfo with uniform file metadata."""
sanitized = tarfile.TarInfo()
sanitized.name = original.name
sanitized.size = original.size
sanitized.mtime = mtime
sanitized.mode = (original.mode & ~(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)) | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
sanitized.type = original.type
sanitized.linkname = original.linkname
sanitized.uid = 0
sanitized.gid = 0
sanitized.uname = "root"
sanitized.gname = "root"
if original.mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
sanitized.mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return sanitized
def test_built_artifact(path: pathlib.Path) -> None:
"""Test the specified built artifact by installing it in a venv and running some basic commands."""
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
venv_dir = temp_dir / "venv"
venv_bin_dir = venv_dir / "bin"
venv.create(venv_dir, with_pip=True)
env = os.environ.copy()
env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment
env.update(
PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])),
)
run("pip", "install", path, env=env | PIP_ENV, cwd=CHECKOUT_DIR)
run("ansible", "--version", env=env, cwd=CHECKOUT_DIR)
run("ansible-test", "--version", env=env, cwd=CHECKOUT_DIR)
def get_sdist_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path:
"""Return the path to the sdist file."""
return dist_dir / f"ansible_core-{version}.tar.gz"
def get_wheel_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path:
"""Return the path to the wheel file."""
return dist_dir / f"ansible_core-{version}-py3-none-any.whl"
def calculate_digest(path: pathlib.Path) -> str:
"""Return the digest for the specified file."""
with open(path, "rb") as f:
digest = hashlib.file_digest(f, DIGEST_ALGORITHM)
return digest.hexdigest()
@functools.cache
def get_release_artifact_details(repository: str, version: Version, validate: bool) -> list[ReleaseArtifact]:
"""Return information about the release artifacts hosted on PyPI."""
data = get_pypi_project(repository, 'ansible-core', version)
artifacts = [describe_release_artifact(version, item, validate) for item in data["urls"]]
expected_artifact_types = {"bdist_wheel", "sdist"}
found_artifact_types = set(artifact.package_type for artifact in artifacts)
if found_artifact_types != expected_artifact_types:
raise RuntimeError(f"Expected {expected_artifact_types} artifact types, but found {found_artifact_types} instead.")
return artifacts
def describe_release_artifact(version: Version, item: dict[str, t.Any], validate: bool) -> ReleaseArtifact:
"""Return release artifact details extracted from the given PyPI data."""
package_type = item["packagetype"]
# The artifact URL is documented as stable, so is safe to put in release notes and announcements.
# See: https://github.com/pypi/warehouse/blame/c95be4a1055f4b36a8852715eb80318c81fc00ca/docs/api-reference/integration-guide.rst#L86-L90
url = item["url"]
pypi_size = item["size"]
pypi_digest = item["digests"][DIGEST_ALGORITHM]
if package_type == "bdist_wheel":
local_artifact_file = get_wheel_path(version)
package_label = "Built Distribution"
elif package_type == "sdist":
local_artifact_file = get_sdist_path(version)
package_label = "Source Distribution"
else:
raise NotImplementedError(f"Package type '{package_type}' is not supported.")
if validate:
try:
local_size = local_artifact_file.stat().st_size
local_digest = calculate_digest(local_artifact_file)
except FileNotFoundError:
raise ApplicationError(f"Missing local artifact: {local_artifact_file.relative_to(CHECKOUT_DIR)}") from None
if local_size != pypi_size:
raise ApplicationError(f"The {version} local {package_type} size {local_size} does not match the PyPI size {pypi_size}.")
if local_digest != pypi_digest:
raise ApplicationError(f"The {version} local {package_type} digest '{local_digest}' does not match the PyPI digest '{pypi_digest}'.")
return ReleaseArtifact(
package_type=package_type,
package_label=package_label,
url=url,
size=pypi_size,
digest=pypi_digest,
digest_algorithm=DIGEST_ALGORITHM.upper(),
)
def get_next_release_date(start: datetime.date, step: int, after: datetime.date) -> datetime.date:
"""Return the next release date."""
if start > after:
raise ValueError(f"{start=} is greater than {after=}")
current_delta = after - start
release_delta = datetime.timedelta(days=(math.floor(current_delta.days / step) + 1) * step)
release = start + release_delta
return release
def create_template_environment() -> jinja2.Environment:
"""Create and return a jinja2 environment."""
env = jinja2.Environment()
env.filters.update(
basename=os.path.basename,
)
return env
def create_github_release_notes(upstream: Remote, repository: str, version: Version, validate: bool) -> str:
"""Create and return GitHub release notes."""
env = create_template_environment()
template = env.from_string(GITHUB_RELEASE_NOTES_TEMPLATE)
variables = dict(
version=version,
releases=get_release_artifact_details(repository, version, validate),
changelog=f"https://github.com/{upstream.user}/{upstream.repo}/blob/v{version}/changelogs/CHANGELOG-v{version.major}.{version.minor}.rst",
)
release_notes = template.render(**variables).strip()
return release_notes
def create_release_announcement(upstream: Remote, repository: str, version: Version, validate: bool) -> ReleaseAnnouncement:
"""Create and return a release announcement message."""
env = create_template_environment()
subject_template = env.from_string(RELEASE_ANNOUNCEMENT_SUBJECT_TEMPLATE)
body_template = env.from_string(RELEASE_ANNOUNCEMENT_BODY_TEMPLATE)
today = datetime.datetime.now(tz=datetime.timezone.utc).date()
variables = dict(
version=version,
info=dict(
name="ansible-core",
short=f"{version.major}.{version.minor}",
releases=get_release_artifact_details(repository, version, validate),
),
next_rc=get_next_release_date(datetime.date(2021, 8, 9), 28, today),
next_ga=get_next_release_date(datetime.date(2021, 8, 16), 28, today),
rc=version.pre and version.pre[0] == "rc",
beta=version.pre and version.pre[0] == "b",
alpha=version.pre and version.pre[0] == "a",
major=version.micro == 0,
upstream=upstream,
)
if version.pre and version.pre[0] in ("a", "b"):
display.warning("The release announcement template does not populate the date for the next release.")
subject = subject_template.render(**variables).strip()
body = body_template.render(**variables).strip()
message = ReleaseAnnouncement(
subject=subject,
body=body,
)
return message
# endregion
# region Templates
FINAL_RELEASE_ANNOUNCEMENT_RECIPIENTS = [
"ansible-announce@googlegroups.com",
"ansible-project@googlegroups.com",
"ansible-devel@googlegroups.com",
]
PRE_RELEASE_ANNOUNCEMENT_RECIPIENTS = [
"ansible-devel@googlegroups.com",
]
GITHUB_RELEASE_NOTES_TEMPLATE = """
# Changelog
See the [full changelog]({{ changelog }}) for the changes included in this release.
# Release Artifacts
{%- for release in releases %}
* {{ release.package_label }}: [{{ release.url|basename }}]({{ release.url }}) - ‌{{ release.size }} bytes
* {{ release.digest }} ({{ release.digest_algorithm }})
{%- endfor %}
"""
# These release templates were adapted from sivel's release announcement script.
# See: https://gist.github.com/sivel/937bc2862a9677d8db875f3b10744d8c
RELEASE_ANNOUNCEMENT_SUBJECT_TEMPLATE = """
New release{% if rc %} candidate{% elif beta %} beta{% elif alpha %} alpha{% endif %}: {{ info.name }} {{ version }}
"""
# NOTE: Gmail will automatically wrap the plain text version when sending.
# There's no need to perform wrapping ahead of time for normal sentences.
# However, lines with special formatting should be kept short to avoid unwanted wrapping.
RELEASE_ANNOUNCEMENT_BODY_TEMPLATE = """
Hi all- we're happy to announce the{{ " " }}
{%- if rc -%}
following release candidate
{%- elif beta -%}
beta release of
{%- elif alpha -%}
alpha release of
{%- else -%}
general release of
{%- endif -%}:
{{ info.name }} {{ version }}
How to get it
-------------
$ python3 -m pip install --user {{ info.name }}=={{ version }}
The release artifacts can be found here:
{% for release in info.releases %}
# {{ release.package_label }}: {{ release.size }} bytes
# {{ release.digest_algorithm }}: {{ release.digest }}
{{ release.url }}
{%- endfor %}
What's new
----------
{% if major %}
This release is a major release.
{%- else -%}
This release is a maintenance release containing numerous bugfixes.
{% endif %}
The full changelog can be found here:
https://github.com/{{ upstream.user }}/{{ upstream.repo }}/blob/v{{ version }}/changelogs/CHANGELOG-v{{ info.short }}.rst
Schedule for future releases
----------------------------
{% if rc %}
The release candidate will become a general availability release on {{ next_ga.strftime('%-d %B %Y') }}.
{% elif beta %}
Subject to the need for additional beta releases, the first release candidate is scheduled for X.
{% elif alpha %}
Subject to the need for additional alpha releases, the first release beta is scheduled for X.
{% else %}
The next release candidate is planned to be released on {{ next_rc.strftime('%-d %B %Y') }}. The next general availability release will be one week after.
{% endif %}
Porting help
------------
If you discover any errors or if any of your working playbooks break when you upgrade, please use the following link to report the regression:
https://github.com/{{ upstream.user }}/{{ upstream.repo }}/issues/new/choose
In your issue, be sure to mention the version that works and the one that doesn't.
Thanks!
"""
# endregion
# region Commands
command = CommandFramework(
repository=dict(metavar="REPO", choices=tuple(PYPI_ENDPOINTS), default="pypi", help="PyPI repository to use: %(choices)s [%(default)s]"),
version=dict(exclusive="version", help="version to set"),
pre=dict(exclusive="version", help="increment version to the specified pre-release (aN, bN, rcN)"),
final=dict(exclusive="version", action="store_true", help="increment version to the next final release"),
commit=dict(help="commit to tag"),
mailto=dict(name="--mailto", action="store_true", help="write announcement to mailto link instead of console"),
validate=dict(name="--no-validate", action="store_false", help="disable validation of PyPI artifacts against local ones"),
prompt=dict(name="--no-prompt", action="store_false", help="disable interactive prompt before publishing with twine"),
setuptools=dict(name='--no-setuptools', action="store_false", help="disable updating setuptools upper bound"),
allow_tag=dict(action="store_true", help="allow an existing release tag (for testing)"),
allow_stale=dict(action="store_true", help="allow a stale checkout (for testing)"),
allow_dirty=dict(action="store_true", help="allow untracked files and files with changes (for testing)"),
)
@command
def instructions() -> None:
"""Show instructions for the release process."""
message = """
Releases must be performed using an up-to-date checkout of a fork of the Ansible repository.
1. Make sure your checkout is up-to-date.
2. Run the `prepare` command [1], then:
a. Submit the PR opened in the browser.
b. Wait for CI to pass.
c. Merge the PR.
3. Update your checkout to include the commit from the PR which was just merged.
4. Run the `complete` command [2], then:
a. Submit the GitHub release opened in the browser.
b. Submit the PR opened in the browser.
c. Send the release announcement opened in your browser.
d. Wait for CI to pass.
e. Merge the PR.
[1] Use the `--final`, `--pre` or `--version` option for control over the version.
[2] During the `publish` step, `twine` may prompt for credentials.
"""
display.show(message.strip())
@command
def show_version(final: bool = False, pre: str | None = None) -> None:
"""Show the current and next ansible-core version."""
current_version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
display.show(f"Current version: {current_version}")
try:
next_version = get_next_version(current_version, final=final, pre=pre)
except ApplicationError as ex:
display.show(f" Next version: Unknown - {ex}")
else:
display.show(f" Next version: {next_version}")
check_ansible_version(current_version, next_version)
@command
def check_state(allow_stale: bool = False) -> None:
"""Verify the git repository is in a usable state for creating a pull request."""
get_git_state(get_ansible_version(), allow_stale)
# noinspection PyUnusedLocal
@command
def prepare(final: bool = False, pre: str | None = None, version: str | None = None, setuptools: bool | None = None) -> None:
"""Prepare a release."""
command.run(
update_version,
update_setuptools,
check_state,
generate_summary,
generate_changelog,
create_release_pr,
)
@command
def update_version(final: bool = False, pre: str | None = None, version: str | None = None) -> None:
"""Update the version embedded in the source code."""
current_version = get_ansible_version(mode=VersionMode.REQUIRE_DEV_POST)
if version:
requested_version = get_ansible_version(version)
else:
requested_version = get_next_version(current_version, final=final, pre=pre)
set_ansible_version(current_version, requested_version)
@command
def update_setuptools(setuptools: bool) -> None:
"""Update the setuptools upper bound in pyproject.toml."""
if not setuptools:
return
requested_version = get_latest_setuptools_version()
set_setuptools_upper_bound(requested_version)
@command
def generate_summary() -> None:
"""Generate a summary changelog fragment for this release."""
version = get_ansible_version()
release_date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d")
summary_path = CHANGELOGS_FRAGMENTS_DIR / f"{version}_summary.yaml"
major_minor = f"{version.major}.{version.minor}"
content = f"""
release_summary: |
| Release Date: {release_date}
| `Porting Guide <https://docs.ansible.com/ansible-core/{major_minor}/porting_guides/porting_guide_core_{major_minor}.html>`__
"""
summary_path.write_text(content.lstrip())
@command
def generate_changelog() -> None:
"""Generate the changelog and validate the results."""
env = ensure_venv()
env.update(
PATH=os.pathsep.join((str(ANSIBLE_BIN_DIR), env["PATH"])),
PYTHONPATH=ANSIBLE_LIB_DIR,
)
# TODO: consider switching back to the original changelog generator instead of using antsibull-changelog
run("antsibull-changelog", "release", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR)
run("antsibull-changelog", "generate", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR)
run("ansible-test", "sanity", CHANGELOGS_DIR, ANSIBLE_RELEASE_FILE, env=env, cwd=CHECKOUT_DIR)
@command
def create_release_pr(allow_stale: bool = False) -> None:
"""Create a branch and open a browser tab for creating a release pull request."""
version = get_ansible_version()
pr = prepare_pull_request(
version=version,
branch=f"release-{version}-{secrets.token_hex(4)}",
title=f"New release v{version}",
add=(
CHANGELOGS_DIR,
ANSIBLE_RELEASE_FILE,
ANSIBLE_PYPROJECT_TOML_FILE,
),
allow_stale=allow_stale,
)
create_pull_request(pr)
# noinspection PyUnusedLocal
@command
def complete(repository: str, mailto: bool = True, allow_dirty: bool = False) -> None:
"""Complete a release after the prepared changes have been merged."""
command.run(
check_state,
build,
test,
publish,
tag_release,
post_version,
create_post_pr,
release_announcement,
)
@command
def build(allow_dirty: bool = False) -> None:
"""Build the sdist and wheel."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
env = ensure_venv()
dirty = git("status", "--porcelain", "--untracked-files=all", capture_output=True).stdout.strip().splitlines()
if dirty:
with suppress_when(allow_dirty):
raise ApplicationError(f"There are {len(dirty)} files which are untracked and/or have changes, which will be omitted from the build.")
sdist_file = get_sdist_path(version)
wheel_file = get_wheel_path(version)
with tempfile.TemporaryDirectory(dir=DIST_DIR, prefix=f"build-{version}-", suffix=".tmp") as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
dist_dir = temp_dir / "dist"
commit_time = int(git("show", "-s", "--format=%ct", capture_output=True).stdout)
env.update(
SOURCE_DATE_EPOCH=commit_time,
)
git("worktree", "add", "-d", temp_dir)
try:
run("python", "-m", "build", env=env, cwd=temp_dir)
create_reproducible_sdist(get_sdist_path(version, dist_dir), sdist_file, commit_time)
get_wheel_path(version, dist_dir).rename(wheel_file)
finally:
git("worktree", "remove", temp_dir)
@command
def test() -> None:
"""Test the sdist and wheel."""
command.run(
test_sdist,
test_wheel,
)
@command
def test_sdist() -> None:
"""Test the sdist."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
sdist_file = get_sdist_path(version)
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
with contextlib.ExitStack() as stack:
try:
sdist = stack.enter_context(tarfile.open(sdist_file))
except FileNotFoundError:
raise ApplicationError(f"Missing sdist: {sdist_file.relative_to(CHECKOUT_DIR)}") from None
# deprecated: description='extractall fallback without filter' python_version='3.11'
if hasattr(tarfile, 'data_filter'):
sdist.extractall(temp_dir, filter='data') # type: ignore[call-arg]
else:
sdist.extractall(temp_dir)
pyc_glob = "*.pyc*"
pyc_files = sorted(path.relative_to(temp_dir) for path in temp_dir.rglob(pyc_glob))
if pyc_files:
raise ApplicationError(f"Found {len(pyc_files)} '{pyc_glob}' file(s): {', '.join(map(str, pyc_files))}")
test_built_artifact(sdist_file)
@command
def test_wheel() -> None:
"""Test the wheel."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
wheel_file = get_wheel_path(version)
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
with contextlib.ExitStack() as stack:
try:
wheel = stack.enter_context(zipfile.ZipFile(wheel_file))
except FileNotFoundError:
raise ApplicationError(f"Missing wheel for version {version}: {wheel_file}") from None
wheel.extractall(temp_dir)
test_built_artifact(wheel_file)
@command
def publish(repository: str, prompt: bool = True) -> None:
"""Publish to PyPI."""
version = get_ansible_version()
sdist_file = get_sdist_path(version)
wheel_file = get_wheel_path(version)
env = ensure_venv()
if prompt:
try:
while input(f"Do you want to publish {version} to the '{repository}' repository?\nEnter the repository name to confirm: ") != repository:
pass
except KeyboardInterrupt:
display.show("")
raise ApplicationError("Publishing was aborted by the user.") from None
run("twine", "upload", "-r", repository, sdist_file, wheel_file, env=env, cwd=CHECKOUT_DIR)
@command
def tag_release(repository: str, commit: str | None = None, validate: bool = True, allow_tag: bool = False) -> None:
"""Create a GitHub release using the current or specified commit."""
upstream = get_remotes().upstream
if commit:
git("fetch", upstream.name) # fetch upstream to make sure the commit can be found
commit = get_commit(commit)
version = get_ansible_version(commit=commit)
tag = f"v{version}"
if upstream_tag := git("ls-remote", "--tags", upstream.name, tag, capture_output=True).stdout.strip():
with suppress_when(allow_tag):
raise ApplicationError(f"Version {version} has already been tagged: {upstream_tag}")
upstream_branch = get_upstream_branch(version)
upstream_refs = git("branch", "-r", "--format=%(refname)", "--contains", commit, capture_output=True).stdout.strip().splitlines()
upstream_ref = f"refs/remotes/{upstream.name}/{upstream_branch}"
if upstream_ref not in upstream_refs:
raise ApplicationError(f"Commit {upstream_ref} not found. Found {len(upstream_refs)} upstream ref(s): {', '.join(upstream_refs)}")
body = create_github_release_notes(upstream, repository, version, validate)
release = GitHubRelease(
user=upstream.user,
repo=upstream.repo,
target=commit,
tag=tag,
title=tag,
body=body,
pre_release=version.pre is not None,
)
create_github_release(release)
@command
def post_version() -> None:
"""Set the post release version."""
current_version = get_ansible_version()
requested_version = get_ansible_version(f"{current_version}.post0", mode=VersionMode.REQUIRE_POST)
set_ansible_version(current_version, requested_version)
@command
def create_post_pr(allow_stale: bool = False) -> None:
"""Create a branch and open a browser tab for creating a post release pull request."""
version = get_ansible_version(mode=VersionMode.REQUIRE_POST)
pr = prepare_pull_request(
version=version,
branch=f"release-{version}-{secrets.token_hex(4)}",
title=f"Update Ansible release version to v{version}.",
add=(ANSIBLE_RELEASE_FILE,),
allow_stale=allow_stale,
)
create_pull_request(pr)
@command
def release_announcement(repository: str, version: str | None = None, mailto: bool = True, validate: bool = True) -> None:
"""Generate a release announcement for the current or specified version."""
parsed_version = get_ansible_version(version, mode=VersionMode.STRIP_POST)
upstream = get_remotes().upstream
message = create_release_announcement(upstream, repository, parsed_version, validate)
recipient_list = PRE_RELEASE_ANNOUNCEMENT_RECIPIENTS if parsed_version.is_prerelease else FINAL_RELEASE_ANNOUNCEMENT_RECIPIENTS
recipients = ", ".join(recipient_list)
if mailto:
to = urllib.parse.quote(recipients)
params = dict(
subject=message.subject,
body=message.body,
)
query_string = urllib.parse.urlencode(params)
url = f"mailto:{to}?{query_string}"
display.show("Opening email client through default web browser ...")
webbrowser.open(url)
else:
print(f"TO: {recipients}")
print(f"SUBJECT: {message.subject}")
print()
print(message.body)
# endregion
if __name__ == "__main__":
command.main()
| 53,871
|
Python
|
.py
| 1,131
| 41.22458
| 190
| 0.679004
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,484
|
build.py
|
ansible_ansible/packaging/cli-doc/build.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Build documentation for ansible-core CLI programs."""
from __future__ import annotations
import argparse
import dataclasses
import importlib
import inspect
import io
import itertools
import json
import pathlib
import sys
import typing as t
import warnings
import jinja2
if t.TYPE_CHECKING:
from ansible.cli import CLI # pragma: nocover
SCRIPT_DIR = pathlib.Path(__file__).resolve().parent
SOURCE_DIR = SCRIPT_DIR.parent.parent
def main() -> None:
"""Main program entry point."""
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(required=True, metavar='command')
man_parser = subparsers.add_parser('man', description=build_man.__doc__, help=build_man.__doc__)
man_parser.add_argument('--output-dir', required=True, type=pathlib.Path, metavar='DIR', help='output directory')
man_parser.add_argument('--template-file', default=SCRIPT_DIR / 'man.j2', type=pathlib.Path, metavar='FILE', help='template file')
man_parser.set_defaults(func=build_man)
rst_parser = subparsers.add_parser('rst', description=build_rst.__doc__, help=build_rst.__doc__)
rst_parser.add_argument('--output-dir', required=True, type=pathlib.Path, metavar='DIR', help='output directory')
rst_parser.add_argument('--template-file', default=SCRIPT_DIR / 'rst.j2', type=pathlib.Path, metavar='FILE', help='template file')
rst_parser.set_defaults(func=build_rst)
json_parser = subparsers.add_parser('json', description=build_json.__doc__, help=build_json.__doc__)
json_parser.add_argument('--output-file', required=True, type=pathlib.Path, metavar='FILE', help='output file')
json_parser.set_defaults(func=build_json)
try:
# noinspection PyUnresolvedReferences
import argcomplete
except ImportError:
pass
else:
argcomplete.autocomplete(parser)
args = parser.parse_args()
kwargs = {name: getattr(args, name) for name in inspect.signature(args.func).parameters}
sys.path.insert(0, str(SOURCE_DIR / 'lib'))
args.func(**kwargs)
def build_man(output_dir: pathlib.Path, template_file: pathlib.Path) -> None:
"""Build man pages for ansible-core CLI programs."""
if not template_file.resolve().is_relative_to(SCRIPT_DIR):
warnings.warn("Custom templates are intended for debugging purposes only. The data model may change in future releases without notice.")
import docutils.core
import docutils.writers.manpage
output_dir.mkdir(exist_ok=True, parents=True)
for cli_name, source in generate_rst(template_file).items():
with io.StringIO(source) as source_file:
docutils.core.publish_file(
source=source_file,
destination_path=output_dir / f'{cli_name}.1',
writer=docutils.writers.manpage.Writer(),
)
def build_rst(output_dir: pathlib.Path, template_file: pathlib.Path) -> None:
"""Build RST documentation for ansible-core CLI programs."""
if not template_file.resolve().is_relative_to(SCRIPT_DIR):
warnings.warn("Custom templates are intended for debugging purposes only. The data model may change in future releases without notice.")
output_dir.mkdir(exist_ok=True, parents=True)
for cli_name, source in generate_rst(template_file).items():
(output_dir / f'{cli_name}.rst').write_text(source)
def build_json(output_file: pathlib.Path) -> None:
"""Build JSON documentation for ansible-core CLI programs."""
warnings.warn("JSON output is intended for debugging purposes only. The data model may change in future releases without notice.")
output_file.parent.mkdir(exist_ok=True, parents=True)
output_file.write_text(json.dumps(collect_programs(), indent=4))
def generate_rst(template_file: pathlib.Path) -> dict[str, str]:
"""Generate RST pages using the provided template."""
results: dict[str, str] = {}
for cli_name, template_vars in collect_programs().items():
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_file.parent))
template = env.get_template(template_file.name)
results[cli_name] = template.render(template_vars)
return results
def collect_programs() -> dict[str, dict[str, t.Any]]:
"""Return information about CLI programs."""
programs: list[tuple[str, dict[str, t.Any]]] = []
cli_bin_name_list: list[str] = []
for source_file in (SOURCE_DIR / 'lib/ansible/cli').glob('*.py'):
if source_file.name != '__init__.py':
programs.append(generate_options_docs(source_file, cli_bin_name_list))
return dict(programs)
def generate_options_docs(source_file: pathlib.Path, cli_bin_name_list: list[str]) -> tuple[str, dict[str, t.Any]]:
"""Generate doc structure from CLI module options."""
import ansible.release
if str(source_file).endswith('/lib/ansible/cli/adhoc.py'):
cli_name = 'ansible'
cli_class_name = 'AdHocCLI'
cli_module_fqn = 'ansible.cli.adhoc'
else:
cli_module_name = source_file.with_suffix('').name
cli_name = f'ansible-{cli_module_name}'
cli_class_name = f'{cli_module_name.capitalize()}CLI'
cli_module_fqn = f'ansible.cli.{cli_module_name}'
cli_bin_name_list.append(cli_name)
cli_module = importlib.import_module(cli_module_fqn)
cli_class: type[CLI] = getattr(cli_module, cli_class_name)
cli = cli_class([cli_name])
cli.init_parser()
parser: argparse.ArgumentParser = cli.parser
long_desc = cli.__doc__
arguments: dict[str, str] | None = getattr(cli, 'ARGUMENTS', None)
action_docs = get_action_docs(parser)
option_names: tuple[str, ...] = tuple(itertools.chain.from_iterable(opt.options for opt in action_docs))
actions: dict[str, dict[str, t.Any]] = {}
content_depth = populate_subparser_actions(parser, option_names, actions)
docs = dict(
version=ansible.release.__version__,
source=str(source_file.relative_to(SOURCE_DIR)),
cli_name=cli_name,
usage=parser.format_usage(),
short_desc=parser.description,
long_desc=trim_docstring(long_desc),
actions=actions,
options=[item.__dict__ for item in action_docs],
arguments=arguments,
option_names=option_names,
cli_bin_name_list=cli_bin_name_list,
content_depth=content_depth,
inventory='-i' in option_names,
library='-M' in option_names,
)
return cli_name, docs
def populate_subparser_actions(parser: argparse.ArgumentParser, shared_option_names: tuple[str, ...], actions: dict[str, dict[str, t.Any]]) -> int:
"""Generate doc structure from CLI module subparser options."""
try:
# noinspection PyProtectedMember
subparsers: dict[str, argparse.ArgumentParser] = parser._subparsers._group_actions[0].choices # type: ignore
except AttributeError:
subparsers = {}
depth = 0
for subparser_action, subparser in subparsers.items():
subparser_option_names: set[str] = set()
subparser_action_docs: set[ActionDoc] = set()
subparser_actions: dict[str, dict[str, t.Any]] = {}
for action_doc in get_action_docs(subparser):
for option_alias in action_doc.options:
if option_alias in shared_option_names:
continue
subparser_option_names.add(option_alias)
subparser_action_docs.add(action_doc)
depth = populate_subparser_actions(subparser, shared_option_names, subparser_actions)
actions[subparser_action] = dict(
option_names=list(subparser_option_names),
options=[item.__dict__ for item in subparser_action_docs],
actions=subparser_actions,
name=subparser_action,
desc=trim_docstring(subparser.get_default("func").__doc__),
)
return depth + 1
@dataclasses.dataclass(frozen=True)
class ActionDoc:
"""Documentation for an action."""
desc: str | None
options: tuple[str, ...]
arg: str | None
def get_action_docs(parser: argparse.ArgumentParser) -> list[ActionDoc]:
"""Get action documentation from the given argument parser."""
action_docs = []
# noinspection PyProtectedMember
for action in parser._actions:
if action.help == argparse.SUPPRESS:
continue
# noinspection PyProtectedMember, PyUnresolvedReferences
args = action.dest.upper() if isinstance(action, argparse._StoreAction) else None
if args or action.option_strings:
action_docs.append(ActionDoc(
desc=action.help,
options=tuple(action.option_strings),
arg=args,
))
return action_docs
def trim_docstring(docstring: str | None) -> str:
"""Trim and return the given docstring using the implementation from https://peps.python.org/pep-0257/#handling-docstring-indentation."""
if not docstring:
return '' # pragma: nocover
# Convert tabs to spaces (following the normal Python rules) and split into a list of lines
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count)
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special)
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string
return '\n'.join(trimmed)
if __name__ == '__main__':
main()
| 9,918
|
Python
|
.py
| 204
| 41.681373
| 147
| 0.680984
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,485
|
83643-fix-sanity-ignore-for-copy.yml
|
ansible_ansible/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml
|
minor_changes:
- copy - parameter ``local_follow`` was incorrectly documented as having default value ``True`` (https://github.com/ansible/ansible/pull/83643).
- copy - fix sanity test failures (https://github.com/ansible/ansible/pull/83643).
| 247
|
Python
|
.py
| 3
| 80
| 146
| 0.75
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,486
|
ansible-test-pylint-fix.yml
|
ansible_ansible/changelogs/fragments/ansible-test-pylint-fix.yml
|
bugfixes:
- ansible-test - Enable the ``sys.unraisablehook`` work-around for the ``pylint`` sanity test on Python 3.11.
Previously the work-around was only enabled for Python 3.12 and later.
However, the same issue has been discovered on Python 3.11.
| 261
|
Python
|
.py
| 4
| 61.75
| 111
| 0.747082
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,487
|
return_skeleton_generator.py
|
ansible_ansible/hacking/return_skeleton_generator.py
|
#!/usr/bin/env python
# (c) 2017, Will Thames <will@thames.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# return_skeleton_generator.py takes JSON output from a module and
# and creates a starting point for the RETURNS section of a module.
# This can be provided as stdin or a file argument
#
# The easiest way to obtain the JSON output is to use hacking/test-module.py
#
# You will likely want to adjust this to remove sensitive data or
# ensure the `returns` value is correct, and to write a useful description
from __future__ import annotations
from collections import OrderedDict
import json
import sys
import yaml
# Allow OrderedDicts to be used when dumping YAML
# https://stackoverflow.com/a/16782282/3538079
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
def get_return_data(key, value):
# The OrderedDict here is so that, for complex objects, the
# summary data is at the top before the contains information
returns_info = {key: OrderedDict()}
returns_info[key]['description'] = "FIXME *** add description for %s" % key
returns_info[key]['returned'] = "always"
if isinstance(value, dict):
returns_info[key]['type'] = 'complex'
returns_info[key]['contains'] = get_all_items(value)
elif isinstance(value, list) and value and isinstance(value[0], dict):
returns_info[key]['type'] = 'complex'
returns_info[key]['contains'] = get_all_items(value[0])
else:
returns_info[key]['type'] = type(value).__name__
returns_info[key]['sample'] = value
# override python unicode type to set to string for docs
if returns_info[key]['type'] == 'unicode':
returns_info[key]['type'] = 'str'
return returns_info
def get_all_items(data):
items = sorted([get_return_data(key, value) for key, value in data.items()])
result = OrderedDict()
for item in items:
key, value = item.items()[0]
result[key] = value
return result
def main(args):
yaml.representer.SafeRepresenter.add_representer(OrderedDict, represent_ordereddict)
if args:
src = open(args[0])
else:
src = sys.stdin
data = json.load(src, strict=False)
docs = get_all_items(data)
if 'invocation' in docs:
del docs['invocation']
print(yaml.safe_dump(docs, default_flow_style=False))
if __name__ == '__main__':
main(sys.argv[1:])
| 3,276
|
Python
|
.py
| 78
| 37.730769
| 88
| 0.704939
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,488
|
test-module.py
|
ansible_ansible/hacking/test-module.py
|
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# this script is for testing modules without running through the
# entire guts of ansible, and is very helpful for when developing
# modules
#
# example:
# ./hacking/test-module.py -m lib/ansible/modules/command.py -a "/bin/sleep 3"
# ./hacking/test-module.py -m lib/ansible/modules/command.py -a "/bin/sleep 3" --debugger /usr/bin/pdb
# ./hacking/test-module.py -m lib/ansible/modules/lineinfile.py -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check
# ./hacking/test-module.py -m lib/ansible/modules/command.py -a "echo hello" -n -o "test_hello"
from __future__ import annotations
import glob
import optparse
import os
import subprocess
import sys
import traceback
import shutil
from pathlib import Path
from ansible.release import __version__
import ansible.utils.vars as utils_vars
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.utils.jsonify import jsonify
from ansible.parsing.splitter import parse_kv
from ansible.plugins.loader import init_plugin_loader
from ansible.executor import module_common
import ansible.constants as C
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.template import Templar
import json
def parse():
"""parse command line
:return : (options, args)"""
parser = optparse.OptionParser()
parser.usage = "%prog -[options] (-h for help)"
parser.add_option('-m', '--module-path', dest='module_path',
help="REQUIRED: full path of module source to execute")
parser.add_option('-a', '--args', dest='module_args', default="",
help="module argument string")
parser.add_option('-D', '--debugger', dest='debugger',
help="path to python debugger (e.g. /usr/bin/pdb)")
parser.add_option('-I', '--interpreter', dest='interpreter',
help="path to interpreter to use for this module"
" (e.g. ansible_python_interpreter=/usr/bin/python)",
metavar='INTERPRETER_TYPE=INTERPRETER_PATH',
default="ansible_python_interpreter=%s" %
(sys.executable if sys.executable else '/usr/bin/python'))
parser.add_option('-c', '--check', dest='check', action='store_true',
help="run the module in check mode")
parser.add_option('-n', '--noexecute', dest='execute', action='store_false',
default=True, help="do not run the resulting module")
parser.add_option('-o', '--output', dest='filename',
help="Filename for resulting module",
default="~/.ansible_module_generated")
options, args = parser.parse_args()
if not options.module_path:
parser.print_help()
sys.exit(1)
else:
return options, args
def write_argsfile(argstring, json=False):
""" Write args to a file for old-style module's use. """
argspath = Path("~/.ansible_test_module_arguments").expanduser()
if json:
args = parse_kv(argstring)
argstring = jsonify(args)
argspath.write_text(argstring)
return argspath
def get_interpreters(interpreter):
result = dict()
if interpreter:
if '=' not in interpreter:
print("interpreter must by in the form of ansible_python_interpreter=/usr/bin/python")
sys.exit(1)
interpreter_type, interpreter_path = interpreter.split('=')
if not interpreter_type.startswith('ansible_'):
interpreter_type = 'ansible_%s' % interpreter_type
if not interpreter_type.endswith('_interpreter'):
interpreter_type = '%s_interpreter' % interpreter_type
result[interpreter_type] = interpreter_path
return result
def boilerplate_module(modfile, args, interpreters, check, destfile):
""" simulate what ansible does with new style modules """
# module_fh = open(modfile)
# module_data = module_fh.read()
# module_fh.close()
# replacer = module_common.ModuleReplacer()
loader = DataLoader()
# included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
complex_args = {}
# default selinux fs list is pass in as _ansible_selinux_special_fs arg
complex_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
complex_args['_ansible_tmpdir'] = C.DEFAULT_LOCAL_TMP
complex_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
complex_args['_ansible_version'] = __version__
if args.startswith("@"):
# Argument is a YAML file (JSON is a subset of YAML)
complex_args = utils_vars.combine_vars(complex_args, loader.load_from_file(args[1:]))
args = ''
elif args.startswith("{"):
# Argument is a YAML document (not a file)
complex_args = utils_vars.combine_vars(complex_args, loader.load(args))
args = ''
if args:
parsed_args = parse_kv(args)
complex_args = utils_vars.combine_vars(complex_args, parsed_args)
task_vars = interpreters
if check:
complex_args['_ansible_check_mode'] = True
modname = os.path.basename(modfile)
modname = os.path.splitext(modname)[0]
(module_data, module_style, shebang) = module_common.modify_module(
modname,
modfile,
complex_args,
Templar(loader=loader),
task_vars=task_vars
)
if module_style == 'new' and '_ANSIBALLZ_WRAPPER = True' in to_native(module_data):
module_style = 'ansiballz'
modfile2_path = os.path.expanduser(destfile)
print("* including generated source, if any, saving to: %s" % modfile2_path)
if module_style not in ('ansiballz', 'old'):
print("* this may offset any line numbers in tracebacks/debuggers!")
with open(modfile2_path, 'wb') as modfile2:
modfile2.write(module_data)
modfile = modfile2_path
return (modfile2_path, modname, module_style)
def ansiballz_setup(modfile, modname, interpreters):
os.system("chmod +x %s" % modfile)
if 'ansible_python_interpreter' in interpreters:
command = [interpreters['ansible_python_interpreter']]
else:
command = []
command.extend([modfile, 'explode'])
cmd = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out, err = to_text(out, errors='surrogate_or_strict'), to_text(err)
lines = out.splitlines()
if len(lines) != 2 or 'Module expanded into' not in lines[0]:
print("*" * 35)
print("INVALID OUTPUT FROM ANSIBALLZ MODULE WRAPPER")
print(out)
sys.exit(err)
debug_dir = lines[1].strip()
# All the directories in an AnsiBallZ that modules can live
core_dirs = glob.glob(os.path.join(debug_dir, 'ansible/modules'))
collection_dirs = glob.glob(os.path.join(debug_dir, 'ansible_collections/*/*/plugins/modules'))
# There's only one module in an AnsiBallZ payload so look for the first module and then exit
for module_dir in core_dirs + collection_dirs:
for dirname, directories, filenames in os.walk(module_dir):
for filename in filenames:
if filename == modname + '.py':
modfile = os.path.join(dirname, filename)
break
argsfile = os.path.join(debug_dir, 'args')
print("* ansiballz module detected; extracted module source to: %s" % debug_dir)
return modfile, argsfile
def runtest(modfile, argspath, modname, module_style, interpreters):
"""Test run a module, piping it's output for reporting."""
invoke = ""
if module_style == 'ansiballz':
modfile, argspath = ansiballz_setup(modfile, modname, interpreters)
if 'ansible_python_interpreter' in interpreters:
invoke = "%s " % interpreters['ansible_python_interpreter']
os.system("chmod +x %s" % modfile)
invoke = "%s%s" % (invoke, modfile)
if argspath is not None:
invoke = "%s %s" % (invoke, argspath)
cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
out, err = to_text(out), to_text(err)
try:
print("*" * 35)
print("RAW OUTPUT")
print(out)
print(err)
results = json.loads(out)
except Exception:
print("*" * 35)
print("INVALID OUTPUT FORMAT")
print(out)
traceback.print_exc()
sys.exit(1)
print("*" * 35)
print("PARSED OUTPUT")
print(jsonify(results, format=True))
def rundebug(debugger, modfile, argspath, modname, module_style, interpreters):
"""Run interactively with console debugger."""
if module_style == 'ansiballz':
modfile, argspath = ansiballz_setup(modfile, modname, interpreters)
if argspath is not None:
subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True)
else:
subprocess.call("%s %s" % (debugger, modfile), shell=True)
def main():
options, args = parse()
init_plugin_loader()
interpreters = get_interpreters(options.interpreter)
(modfile, modname, module_style) = boilerplate_module(options.module_path, options.module_args, interpreters, options.check, options.filename)
argspath = None
if module_style not in ('new', 'ansiballz'):
if module_style in ('non_native_want_json', 'binary'):
argspath = write_argsfile(options.module_args, json=True)
elif module_style == 'old':
argspath = write_argsfile(options.module_args, json=False)
else:
raise Exception("internal error, unexpected module style: %s" % module_style)
if options.execute:
if options.debugger:
rundebug(options.debugger, modfile, argspath, modname, module_style, interpreters)
else:
runtest(modfile, argspath, modname, module_style, interpreters)
if __name__ == "__main__":
try:
main()
finally:
shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
| 10,811
|
Python
|
.py
| 235
| 39.26383
| 146
| 0.668695
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,489
|
create-bulk-issues.py
|
ansible_ansible/hacking/create-bulk-issues.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Create GitHub issues for deprecated features."""
from __future__ import annotations
import abc
import argparse
import dataclasses
import os
import pathlib
import re
import subprocess
import sys
import typing as t
import yaml
try:
# noinspection PyPackageRequirements
import argcomplete
except ImportError:
argcomplete = None
from ansible.release import __version__
MAJOR_MINOR_VERSION = '.'.join(__version__.split('.')[:2])
PROJECT = f'ansible-core {MAJOR_MINOR_VERSION}'
@dataclasses.dataclass(frozen=True)
class Issue:
title: str
summary: str
body: str
project: str
labels: list[str] | None = None
def create(self) -> str:
cmd = ['gh', 'issue', 'create', '--title', self.title, '--body', self.body, '--project', self.project]
if self.labels:
for label in self.labels:
cmd.extend(('--label', label))
process = subprocess.run(cmd, capture_output=True, check=True)
url = process.stdout.decode().strip()
return url
@dataclasses.dataclass(frozen=True)
class Feature:
title: str
summary: str
component: str
labels: list[str] | None = None
@staticmethod
def from_dict(data: dict[str, t.Any]) -> Feature:
title = data.get('title')
summary = data.get('summary')
component = data.get('component')
labels = data.get('labels')
if not isinstance(title, str):
raise RuntimeError(f'`title` is not `str`: {title}')
if not isinstance(summary, str):
raise RuntimeError(f'`summary` is not `str`: {summary}')
if not isinstance(component, str):
raise RuntimeError(f'`component` is not `str`: {component}')
if not isinstance(labels, list) or not all(isinstance(item, str) for item in labels):
raise RuntimeError(f'`labels` is not `list[str]`: {labels}')
return Feature(
title=title,
summary=summary,
component=component,
labels=labels,
)
def create_issue(self, project: str) -> Issue:
body = f'''
### Summary
{self.summary}
### Issue Type
Feature Idea
### Component Name
`{self.component}`
'''
return Issue(
title=self.title,
summary=self.summary,
body=body.strip(),
project=project,
labels=self.labels,
)
@dataclasses.dataclass(frozen=True)
class BugReport:
title: str
summary: str
component: str
labels: list[str] | None = None
def create_issue(self, project: str) -> Issue:
body = f'''
### Summary
{self.summary}
### Issue Type
Bug Report
### Component Name
`{self.component}`
### Ansible Version
{MAJOR_MINOR_VERSION}
### Configuration
N/A
### OS / Environment
N/A
### Steps to Reproduce
N/A
### Expected Results
N/A
### Actual Results
N/A
'''
return Issue(
title=self.title,
summary=self.summary,
body=body.strip(),
project=project,
labels=self.labels,
)
@dataclasses.dataclass(frozen=True)
class Deprecation(metaclass=abc.ABCMeta):
@staticmethod
@abc.abstractmethod
def parse(message: str) -> Deprecation:
pass
@abc.abstractmethod
def create_bug_report(self) -> BugReport:
pass
@dataclasses.dataclass(frozen=True)
class DeprecatedConfig(Deprecation):
path: str
config: str
version: str
@staticmethod
def parse(message: str) -> DeprecatedConfig:
match = re.search('^(?P<path>.*):[0-9]+:[0-9]+: (?P<config>.*) is scheduled for removal in (?P<version>[0-9.]+)$', message)
if not match:
raise Exception(f'Unable to parse: {message}')
return DeprecatedConfig(
path=match.group('path'),
config=match.group('config'),
version=match.group('version'),
)
def create_bug_report(self) -> BugReport:
return BugReport(
title=f'Remove deprecated {self.config}',
summary=f'The config option `{self.config}` should be removed from `{self.path}`. It was scheduled for removal in {self.version}.',
component=self.path,
)
@dataclasses.dataclass(frozen=True)
class UpdateBundled(Deprecation):
path: str
package: str
old_version: str
new_version: str
json_link: str
link: str = dataclasses.field(default='', init=False)
def __post_init__(self) -> None:
object.__setattr__(self, 'link', re.sub('/json$', '', self.json_link))
@staticmethod
def parse(message: str) -> UpdateBundled:
match = re.search('^(?P<path>.*):[0-9]+:[0-9]+: UPDATE (?P<package>.*) from (?P<old>[0-9.]+) to (?P<new>[0-9.]+) (?P<link>https://.*)$', message)
if not match:
raise Exception(f'Unable to parse: {message}')
return UpdateBundled(
path=match.group('path'),
package=match.group('package'),
old_version=match.group('old'),
new_version=match.group('new'),
json_link=match.group('link'),
)
def create_bug_report(self) -> BugReport:
return BugReport(
title=f'Update bundled {self.package} to {self.new_version}',
summary=f'Update the bundled package [{self.package}]({self.link}) from `{self.old_version}` to `{self.new_version}`.',
component=self.path,
)
TEST_OPTIONS = {
'update-bundled': UpdateBundled,
'deprecated-config': DeprecatedConfig,
}
@dataclasses.dataclass(frozen=True)
class Args:
create: bool
verbose: bool
def run(self) -> None:
raise NotImplementedError()
@dataclasses.dataclass(frozen=True)
class DeprecationArgs(Args):
tests: list[str]
def run(self) -> None:
deprecated_command(self)
@dataclasses.dataclass(frozen=True)
class FeatureArgs(Args):
source: pathlib.Path
def run(self) -> None:
feature_command(self)
def parse_args() -> Args:
parser = argparse.ArgumentParser()
create_common_arguments(parser)
subparser = parser.add_subparsers(required=True)
create_deprecation_parser(subparser)
create_feature_parser(subparser)
args = invoke_parser(parser)
return args
def create_deprecation_parser(subparser) -> None:
parser: argparse.ArgumentParser = subparser.add_parser('deprecation')
parser.set_defaults(type=DeprecationArgs)
parser.set_defaults(command=deprecated_command)
parser.add_argument(
'--test',
dest='tests',
choices=tuple(TEST_OPTIONS),
action='append',
help='sanity test name',
)
create_common_arguments(parser)
def create_feature_parser(subparser) -> None:
parser: argparse.ArgumentParser = subparser.add_parser('feature')
parser.set_defaults(type=FeatureArgs)
parser.set_defaults(command=feature_command)
parser.add_argument(
'--source',
type=pathlib.Path,
default=pathlib.Path('issues.yml'),
help='YAML file containing issue details (default: %(default)s)',
)
create_common_arguments(parser)
def create_common_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--create',
action='store_true',
help='create issues on GitHub',
)
parser.add_argument(
'-v',
dest='verbose',
action='store_true',
help='verbose output',
)
def invoke_parser(parser: argparse.ArgumentParser) -> Args:
if argcomplete:
argcomplete.autocomplete(parser)
parsed_args = parser.parse_args()
kvp = {}
args_type = parsed_args.type
for field in dataclasses.fields(args_type):
kvp[field.name] = getattr(parsed_args, field.name)
args = args_type(**kvp)
return args
def run_sanity_test(test_name: str) -> list[str]:
cmd = ['ansible-test', 'sanity', '--test', test_name, '--lint', '--failure-ok']
skip_path = 'test/sanity/code-smell/skip.txt'
skip_temp_path = skip_path + '.tmp'
os.rename(skip_path, skip_temp_path) # make sure ansible-test isn't configured to skip any tests
try:
process = subprocess.run(cmd, capture_output=True, check=True)
finally:
os.rename(skip_temp_path, skip_path) # restore the skip entries
messages = process.stdout.decode().splitlines()
return messages
def create_issues_from_deprecation_messages(test_type: t.Type[Deprecation], messages: list[str]) -> list[Issue]:
deprecations = [test_type.parse(message) for message in messages]
bug_reports = [deprecation.create_bug_report() for deprecation in deprecations]
issues = [bug_report.create_issue(PROJECT) for bug_report in bug_reports]
return issues
def info(message: str) -> None:
print(message, file=sys.stderr)
def main() -> None:
args = parse_args()
args.run()
def deprecated_command(args: DeprecationArgs) -> None:
issues: list[Issue] = []
for test in args.tests or list(TEST_OPTIONS):
test_type = TEST_OPTIONS[test]
info(f'Running "{test}" sanity test...')
messages = run_sanity_test(test)
issues.extend(create_issues_from_deprecation_messages(test_type, messages))
create_issues(args, issues)
def feature_command(args: FeatureArgs) -> None:
with args.source.open() as source_file:
source = yaml.safe_load(source_file)
default: dict[str, t.Any] = source.get('default', {})
features: list[dict[str, t.Any]] = source.get('features', [])
if not isinstance(default, dict):
raise RuntimeError('`default` must be `dict[str, ...]`')
if not isinstance(features, list):
raise RuntimeError('`features` must be `list[dict[str, ...]]`')
issues: list[Issue] = []
for feature in features:
data = default.copy()
data.update(feature)
feature = Feature.from_dict(data)
issues.append(feature.create_issue(PROJECT))
create_issues(args, issues)
def create_issues(args: Args, issues: list[Issue]) -> None:
if not issues:
info('No issues found.')
return
info(f'Found {len(issues)} issue(s) to report:')
for issue in issues:
info(f'[{issue.title}] {issue.summary}')
if args.verbose:
info('>>>')
info(issue.body)
info('>>>')
if args.create:
url = issue.create()
info(url)
if not args.create:
info('Pass the "--create" option to create these issues on GitHub.')
if __name__ == '__main__':
main()
| 10,646
|
Python
|
.py
| 307
| 28.188925
| 153
| 0.643641
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,490
|
report.py
|
ansible_ansible/hacking/report.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""A tool to aggregate data about Ansible source and testing into a sqlite DB for reporting."""
from __future__ import annotations
import argparse
import json
import os
import sqlite3
import sys
DATABASE_PATH = os.path.expanduser('~/.ansible/report.db')
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) + '/'
ANSIBLE_PATH = os.path.join(BASE_PATH, 'lib')
ANSIBLE_TEST_PATH = os.path.join(BASE_PATH, 'test/lib')
if ANSIBLE_PATH not in sys.path:
sys.path.insert(0, ANSIBLE_PATH)
if ANSIBLE_TEST_PATH not in sys.path:
sys.path.insert(0, ANSIBLE_TEST_PATH)
from ansible.module_utils.urls import open_url
from ansible.parsing.plugin_docs import read_docstring
from ansible_test._internal.target import walk_integration_targets
def main():
os.chdir(BASE_PATH)
args = parse_args()
args.func()
def parse_args():
try:
import argcomplete
except ImportError:
argcomplete = None
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(metavar='COMMAND')
subparsers.required = True # work-around for python 3 bug which makes subparsers optional
populate = subparsers.add_parser('populate',
help='populate report database')
populate.set_defaults(func=populate_database)
query = subparsers.add_parser('query',
help='query report database')
query.set_defaults(func=query_database)
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
return args
def query_database():
if not os.path.exists(DATABASE_PATH):
sys.exit('error: Database not found. Did you run `report.py populate` first?')
os.execvp('sqlite3', ('sqlite3', DATABASE_PATH))
def populate_database():
populate_modules()
populate_coverage()
populate_integration_targets()
def populate_modules():
module_dir = os.path.join(BASE_PATH, 'lib/ansible/modules/')
modules_rows = []
for root, dir_names, file_names in os.walk(module_dir):
for file_name in file_names:
module, extension = os.path.splitext(file_name)
if module == '__init__' or extension != '.py':
continue
if module.startswith('_'):
module = module[1:]
namespace = os.path.join(root.replace(module_dir, '')).replace('/', '.')
path = os.path.join(root, file_name)
result = read_docstring(path)
doc = result['doc']
modules_rows.append(dict(
module=module,
namespace=namespace,
path=path.replace(BASE_PATH, ''),
version_added=str(doc.get('version_added', '')) if doc else '',
))
populate_data(dict(
modules=dict(
rows=modules_rows,
schema=(
('module', 'TEXT'),
('namespace', 'TEXT'),
('path', 'TEXT'),
('version_added', 'TEXT'),
)),
))
def populate_coverage():
response = open_url('https://codecov.io/api/gh/ansible/ansible/tree/devel/?src=extension')
data = json.load(response)
files = data['commit']['report']['files']
coverage_rows = []
for path, data in files.items():
report = data['t']
coverage_rows.append(dict(
path=path,
coverage=float(report['c']),
lines=report['n'],
hit=report['h'],
partial=report['p'],
missed=report['m'],
))
populate_data(dict(
coverage=dict(
rows=coverage_rows,
schema=(
('path', 'TEXT'),
('coverage', 'REAL'),
('lines', 'INTEGER'),
('hit', 'INTEGER'),
('partial', 'INTEGER'),
('missed', 'INTEGER'),
)),
))
def populate_integration_targets():
targets = list(walk_integration_targets())
integration_targets_rows = [dict(
target=target.name,
type=target.type,
path=target.path,
script_path=target.script_path,
) for target in targets]
integration_target_aliases_rows = [dict(
target=target.name,
alias=alias,
) for target in targets for alias in target.aliases]
integration_target_modules_rows = [dict(
target=target.name,
module=module,
) for target in targets for module in target.modules]
populate_data(dict(
integration_targets=dict(
rows=integration_targets_rows,
schema=(
('target', 'TEXT'),
('type', 'TEXT'),
('path', 'TEXT'),
('script_path', 'TEXT'),
)),
integration_target_aliases=dict(
rows=integration_target_aliases_rows,
schema=(
('target', 'TEXT'),
('alias', 'TEXT'),
)),
integration_target_modules=dict(
rows=integration_target_modules_rows,
schema=(
('target', 'TEXT'),
('module', 'TEXT'),
)),
))
def create_table(cursor, name, columns):
schema = ', '.join('%s %s' % column for column in columns)
cursor.execute('DROP TABLE IF EXISTS %s' % name)
cursor.execute('CREATE TABLE %s (%s)' % (name, schema))
def populate_table(cursor, rows, name, columns):
create_table(cursor, name, columns)
values = ', '.join([':%s' % column[0] for column in columns])
for row in rows:
cursor.execute('INSERT INTO %s VALUES (%s)' % (name, values), row)
def populate_data(data):
connection = sqlite3.connect(DATABASE_PATH)
cursor = connection.cursor()
for table in data:
populate_table(cursor, data[table]['rows'], table, data[table]['schema'])
connection.commit()
connection.close()
if __name__ == '__main__':
main()
| 6,046
|
Python
|
.py
| 163
| 28.239264
| 97
| 0.588973
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,491
|
update-sanity-requirements.py
|
ansible_ansible/hacking/update-sanity-requirements.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Generate frozen sanity test requirements from source requirements files."""
from __future__ import annotations
import argparse
import dataclasses
import pathlib
import re
import subprocess
import tempfile
import typing as t
import venv
import packaging.version
import packaging.specifiers
import packaging.requirements
try:
import argcomplete
except ImportError:
argcomplete = None
FILE = pathlib.Path(__file__).resolve()
ROOT = FILE.parent.parent
SELF = FILE.relative_to(ROOT)
@dataclasses.dataclass(frozen=True)
class SanityTest:
name: str
requirements_path: pathlib.Path
source_path: pathlib.Path
def freeze_requirements(self) -> None:
source_requirements = [packaging.requirements.Requirement(re.sub(' #.*$', '', line)) for line in self.source_path.read_text().splitlines()]
install_packages = {requirement.name for requirement in source_requirements}
exclude_packages = {'distribute', 'pip', 'setuptools', 'wheel'} - install_packages
with tempfile.TemporaryDirectory() as venv_dir:
venv.create(venv_dir, with_pip=True)
python = pathlib.Path(venv_dir, 'bin', 'python')
pip = [python, '-m', 'pip', '--disable-pip-version-check']
env = dict()
pip_freeze = subprocess.run(pip + ['freeze'], env=env, check=True, capture_output=True, text=True)
if pip_freeze.stdout:
raise Exception(f'Initial virtual environment is not empty:\n{pip_freeze.stdout}')
subprocess.run(pip + ['install', '-r', self.source_path], env=env, check=True)
freeze_options = ['--all']
for exclude_package in exclude_packages:
freeze_options.extend(('--exclude', exclude_package))
pip_freeze = subprocess.run(pip + ['freeze'] + freeze_options, env=env, check=True, capture_output=True, text=True)
self.write_requirements(pip_freeze.stdout)
def update_pre_build(self) -> None:
"""Update requirements in place with current pre-build instructions."""
requirements = pathlib.Path(self.requirements_path).read_text()
lines = requirements.splitlines(keepends=True)
lines = [line for line in lines if not line.startswith('#')]
requirements = ''.join(lines)
self.write_requirements(requirements)
def write_requirements(self, requirements: str) -> None:
"""Write the given test requirements to the requirements file for this test."""
pre_build = pre_build_instructions(requirements)
requirements = f'# edit "{self.source_path.name}" and generate with: {SELF} --test {self.name}\n{pre_build}{requirements}'
with open(self.requirements_path, 'w') as requirement_file:
requirement_file.write(requirements)
@staticmethod
def create(path: pathlib.Path) -> SanityTest:
return SanityTest(
name=path.stem.replace('sanity.', '').replace('.requirements', ''),
requirements_path=path,
source_path=path.with_suffix('.in'),
)
def pre_build_instructions(requirements: str) -> str:
"""Parse the given requirements and return any applicable pre-build instructions."""
parsed_requirements = requirements.splitlines()
package_versions = {
match.group('package').lower(): match.group('version') for match
in (re.search('^(?P<package>.*)==(?P<version>.*)$', requirement) for requirement in parsed_requirements)
if match
}
instructions: list[str] = []
build_constraints = (
('pyyaml', '>= 5.4, <= 6.0', ('Cython < 3.0',)),
)
for package, specifier, constraints in build_constraints:
version_string = package_versions.get(package)
if version_string:
version = packaging.version.Version(version_string)
specifier_set = packaging.specifiers.SpecifierSet(specifier)
if specifier_set.contains(version):
instructions.append(f'# pre-build requirement: {package} == {version}\n')
for constraint in constraints:
instructions.append(f'# pre-build constraint: {constraint}\n')
return ''.join(instructions)
def main() -> None:
tests = find_tests()
parser = argparse.ArgumentParser()
parser.add_argument(
'--test',
metavar='TEST',
dest='test_names',
action='append',
choices=[test.name for test in tests],
help='test requirements to update'
)
parser.add_argument(
'--pre-build-only',
action='store_true',
help='apply pre-build instructions to existing requirements',
)
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
test_names: set[str] = set(args.test_names or [])
tests = [test for test in tests if test.name in test_names] if test_names else tests
for test in tests:
print(f'===[ {test.name} ]===', flush=True)
if args.pre_build_only:
test.update_pre_build()
else:
test.freeze_requirements()
def find_tests() -> t.List[SanityTest]:
globs = (
'test/lib/ansible_test/_data/requirements/sanity.*.txt',
'test/sanity/code-smell/*.requirements.txt',
)
tests: t.List[SanityTest] = []
for glob in globs:
tests.extend(get_tests(pathlib.Path(glob)))
return sorted(tests, key=lambda test: test.name)
def get_tests(glob: pathlib.Path) -> t.List[SanityTest]:
path = pathlib.Path(ROOT, glob.parent)
pattern = glob.name
return [SanityTest.create(item) for item in path.glob(pattern)]
if __name__ == '__main__':
main()
| 5,744
|
Python
|
.py
| 129
| 37.062016
| 147
| 0.658335
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,492
|
ansible-profile.py
|
ansible_ansible/hacking/ansible-profile.py
|
#!/usr/bin/env python
from __future__ import annotations
import cProfile
import sys
import traceback
from ansible.module_utils.common.text.converters import to_text
target = sys.argv.pop(1)
myclass = "%sCLI" % target.capitalize()
module_name = f'ansible.cli.{target}'
try:
# define cli
mycli = getattr(__import__(module_name, fromlist=[myclass]), myclass)
except ImportError as e:
if module_name in e.msg:
raise Exception("Ansible sub-program not implemented: %s" % target) from None
else:
raise
try:
args = [to_text(a, errors='surrogate_or_strict') for a in sys.argv]
except UnicodeError:
sys.stderr.write(u"The full traceback was:\n\n%s" % to_text(traceback.format_exc()))
sys.exit(u'Command line args are parsable to utf-8')
# init cli
cli = mycli(args)
print(cli.__class__.version_info(gitinfo=True))
# parse args
cli.parse()
# run
cProfile.run('cli.run()')
| 915
|
Python
|
.py
| 29
| 28.724138
| 88
| 0.722919
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,493
|
backport_of_line_adder.py
|
ansible_ansible/hacking/backport/backport_of_line_adder.py
|
#!/usr/bin/env python
# (c) 2020, Red Hat, Inc. <relrod@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from github.PullRequest import PullRequest
from github import Github
import os
import re
import sys
PULL_URL_RE = re.compile(r'(?P<user>\S+)/(?P<repo>\S+)#(?P<ticket>\d+)')
PULL_HTTP_URL_RE = re.compile(r'https?://(?:www\.|)github.com/(?P<user>\S+)/(?P<repo>\S+)/pull/(?P<ticket>\d+)')
PULL_BACKPORT_IN_TITLE = re.compile(r'.*\(#?(?P<ticket1>\d+)\)|\(backport of #?(?P<ticket2>\d+)\).*', re.I)
PULL_CHERRY_PICKED_FROM = re.compile(r'\(?cherry(?:\-| )picked from(?: ?commit|) (?P<hash>\w+)(?:\)|\.|$)')
TICKET_NUMBER = re.compile(r'(?:^|\s)#(\d+)')
def normalize_pr_url(pr, allow_non_ansible_ansible=False, only_number=False):
"""
Given a PullRequest, or a string containing a PR number, PR URL,
or internal PR URL (e.g. ansible-collections/community.general#1234),
return either a full github URL to the PR (if only_number is False),
or an int containing the PR number (if only_number is True).
Throws if it can't parse the input.
"""
if isinstance(pr, PullRequest):
return pr.html_url
if pr.isnumeric():
if only_number:
return int(pr)
return 'https://github.com/ansible/ansible/pull/{0}'.format(pr)
# Allow for forcing ansible/ansible
if not allow_non_ansible_ansible and 'ansible/ansible' not in pr:
raise Exception('Non ansible/ansible repo given where not expected')
re_match = PULL_HTTP_URL_RE.match(pr)
if re_match:
if only_number:
return int(re_match.group('ticket'))
return pr
re_match = PULL_URL_RE.match(pr)
if re_match:
if only_number:
return int(re_match.group('ticket'))
return 'https://github.com/{0}/{1}/pull/{2}'.format(
re_match.group('user'),
re_match.group('repo'),
re_match.group('ticket'))
raise Exception('Did not understand given PR')
def url_to_org_repo(url):
"""
Given a full Github PR URL, extract the user/org and repo name.
Return them in the form: "user/repo"
"""
match = PULL_HTTP_URL_RE.match(url)
if not match:
return ''
return '{0}/{1}'.format(match.group('user'), match.group('repo'))
def generate_new_body(pr, source_pr):
"""
Given the new PR (the backport) and the originating (source) PR,
construct the new body for the backport PR.
If the backport follows the usual ansible/ansible template, we look for the
'##### SUMMARY'-type line and add our "Backport of" line right below that.
If we can't find the SUMMARY line, we add our line at the very bottom.
This function does not side-effect, it simply returns the new body as a
string.
"""
backport_text = '\nBackport of {0}\n'.format(source_pr)
body_lines = pr.body.split('\n')
new_body_lines = []
added = False
for line in body_lines:
if 'Backport of http' in line:
raise Exception('Already has a backport line, aborting.')
new_body_lines.append(line)
if line.startswith('#') and line.strip().endswith('SUMMARY'):
# This would be a fine place to add it
new_body_lines.append(backport_text)
added = True
if not added:
# Otherwise, no '#### SUMMARY' line, so just add it at the bottom
new_body_lines.append(backport_text)
return '\n'.join(new_body_lines)
def get_prs_for_commit(g, commit):
"""
Given a commit hash, attempt to find the hash in any repo in the
ansible orgs, and then use it to determine what, if any, PR it appeared in.
"""
commits = g.search_commits(
'hash:{0} org:ansible org:ansible-collections is:public'.format(commit)
).get_page(0)
if not commits or len(commits) == 0:
return []
pulls = commits[0].get_pulls().get_page(0)
if not pulls or len(pulls) == 0:
return []
return pulls
def search_backport(pr, g, ansible_ansible):
"""
Do magic. This is basically the "brain" of 'auto'.
It will search the PR (the newest PR - the backport) and try to find where
it originated.
First it will search in the title. Some titles include things like
"foo bar change (#12345)" or "foo bar change (backport of #54321)"
so we search for those and pull them out.
Next it will scan the body of the PR and look for:
- cherry-pick reference lines (e.g. "cherry-picked from commit XXXXX")
- other PRs (#nnnnnn) and (foo/bar#nnnnnnn)
- full URLs to other PRs
It will take all of the above, and return a list of "possibilities",
which is a list of PullRequest objects.
"""
possibilities = []
# 1. Try searching for it in the title.
title_search = PULL_BACKPORT_IN_TITLE.match(pr.title)
if title_search:
ticket = title_search.group('ticket1')
if not ticket:
ticket = title_search.group('ticket2')
try:
possibilities.append(ansible_ansible.get_pull(int(ticket)))
except Exception:
pass
# 2. Search for clues in the body of the PR
body_lines = pr.body.split('\n')
for line in body_lines:
# a. Try searching for a `git cherry-pick` line
cherrypick = PULL_CHERRY_PICKED_FROM.match(line)
if cherrypick:
prs = get_prs_for_commit(g, cherrypick.group('hash'))
possibilities.extend(prs)
continue
# b. Try searching for other referenced PRs (by #nnnnn or full URL)
tickets = [('ansible', 'ansible', ticket) for ticket in TICKET_NUMBER.findall(line)]
tickets.extend(PULL_HTTP_URL_RE.findall(line))
tickets.extend(PULL_URL_RE.findall(line))
if tickets:
for ticket in tickets:
# Is it a PR (even if not in ansible/ansible)?
# TODO: As a small optimization/to avoid extra calls to GitHub,
# we could limit this check to non-URL matches. If it's a URL,
# we know it's definitely a pull request.
try:
repo_path = '{0}/{1}'.format(ticket[0], ticket[1])
repo = ansible_ansible
if repo_path != 'ansible/ansible':
repo = g.get_repo(repo_path)
ticket_pr = repo.get_pull(int(ticket))
possibilities.append(ticket_pr)
except Exception:
pass
continue # Future-proofing
return possibilities
def prompt_add():
"""
Prompt the user and return whether or not they agree.
"""
res = input('Shall I add the reference? [Y/n]: ')
return res.lower() in ('', 'y', 'yes')
def commit_edit(new_pr, pr):
"""
Given the new PR (the backport), and the "possibility" that we have decided
on, prompt the user and then add the reference to the body of the new PR.
This method does the actual "destructive" work of editing the PR body.
"""
print('I think this PR might have come from:')
print(pr.title)
print('-' * 50)
print(pr.html_url)
if prompt_add():
new_body = generate_new_body(new_pr, pr.html_url)
new_pr.edit(body=new_body)
print('I probably added the reference successfully.')
if __name__ == '__main__':
if (
len(sys.argv) != 3 or
not sys.argv[1].isnumeric()
):
print('Usage: <new backport PR> <already merged PR, or "auto">')
sys.exit(1)
token = os.environ.get('GITHUB_TOKEN')
if not token:
print('Go to https://github.com/settings/tokens/new and generate a '
'token with "repo" access, then set GITHUB_TOKEN to that token.')
sys.exit(1)
# https://github.com/settings/tokens/new
g = Github(token)
ansible_ansible = g.get_repo('ansible/ansible')
try:
pr_num = normalize_pr_url(sys.argv[1], only_number=True)
new_pr = ansible_ansible.get_pull(pr_num)
except Exception:
print('Could not load PR {0}'.format(sys.argv[1]))
sys.exit(1)
if sys.argv[2] == 'auto':
print('Trying to find originating PR...')
possibilities = search_backport(new_pr, g, ansible_ansible)
if not possibilities:
print('No match found, manual review required.')
sys.exit(1)
# TODO: Logic above can return multiple possibilities/guesses, but we
# only handle one here. We can cycle/prompt through them or something.
# For now, use the first match, which is also the most likely
# candidate.
pr = possibilities[0]
commit_edit(new_pr, pr)
else:
try:
# TODO: Fix having to call this twice to save some regex evals
pr_num = normalize_pr_url(sys.argv[2], only_number=True, allow_non_ansible_ansible=True)
pr_url = normalize_pr_url(sys.argv[2], allow_non_ansible_ansible=True)
pr_repo = g.get_repo(url_to_org_repo(pr_url))
pr = pr_repo.get_pull(pr_num)
except Exception as e:
print(e)
print('Could not load PR {0}'.format(sys.argv[2]))
sys.exit(1)
commit_edit(new_pr, pr)
| 9,893
|
Python
|
.py
| 229
| 35.825328
| 112
| 0.631913
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,494
|
gen_distribution_version_testcase.py
|
ansible_ansible/hacking/tests/gen_distribution_version_testcase.py
|
#!/usr/bin/env python
"""
This script generated test_cases for test_distribution_version.py.
To do so it outputs the relevant files from /etc/*release, the output of distro.linux_distribution()
and the current ansible_facts regarding the distribution version.
This assumes a working ansible version in the path.
"""
from __future__ import annotations
import json
import os.path
import platform
import subprocess
import sys
from ansible.module_utils import distro
from ansible.module_utils.common.text.converters import to_text
filelist = [
'/etc/oracle-release',
'/etc/slackware-version',
'/etc/centos-release',
'/etc/redhat-release',
'/etc/vmware-release',
'/etc/openwrt_release',
'/etc/system-release',
'/etc/alpine-release',
'/etc/release',
'/etc/arch-release',
'/etc/os-release',
'/etc/SuSE-release',
'/etc/gentoo-release',
'/etc/os-release',
'/etc/lsb-release',
'/etc/altlinux-release',
'/etc/os-release',
'/etc/coreos/update.conf',
'/usr/lib/os-release',
]
fcont = {}
for f in filelist:
if os.path.exists(f):
s = os.path.getsize(f)
if s > 0 and s < 10000:
with open(f) as fh:
fcont[f] = fh.read()
dist = (distro.id(), distro.version(), distro.codename())
facts = ['distribution', 'distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']
try:
b_ansible_out = subprocess.check_output(
['ansible', 'localhost', '-m', 'setup'])
except subprocess.CalledProcessError as e:
print("ERROR: ansible run failed, output was: \n")
print(e.output)
sys.exit(e.returncode)
ansible_out = to_text(b_ansible_out)
parsed = json.loads(ansible_out[ansible_out.index('{'):])
ansible_facts = {}
for fact in facts:
try:
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
except Exception:
ansible_facts[fact] = "N/A"
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
output = {
'name': nicename,
'distro': {
'codename': distro.codename(),
'id': distro.id(),
'name': distro.name(),
'version': distro.version(),
'version_best': distro.version(best=True),
'lsb_release_info': distro.lsb_release_info(),
'os_release_info': distro.os_release_info(),
},
'input': fcont,
'platform.dist': dist,
'result': ansible_facts,
}
system = platform.system()
if system != 'Linux':
output['platform.system'] = system
release = platform.release()
if release:
output['platform.release'] = release
print(json.dumps(output, indent=4))
| 2,657
|
Python
|
.py
| 83
| 27.650602
| 115
| 0.66771
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,495
|
get_recent_coverage_runs.py
|
ansible_ansible/hacking/azp/get_recent_coverage_runs.py
|
#!/usr/bin/env python
# (c) 2020 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.utils.color import stringc
import requests
import sys
import datetime
# Following changes should be made to improve the overall style:
# TODO use argparse for arguments.
# TODO use new style formatting method.
# TODO use requests session.
# TODO type hints.
BRANCH = 'devel'
PIPELINE_ID = 20
MAX_AGE = datetime.timedelta(hours=24)
if len(sys.argv) > 1:
BRANCH = sys.argv[1]
def get_coverage_runs():
list_response = requests.get("https://dev.azure.com/ansible/ansible/_apis/pipelines/%s/runs?api-version=6.0-preview.1" % PIPELINE_ID)
list_response.raise_for_status()
runs = list_response.json()
coverage_runs = []
for run_summary in runs["value"][0:1000]:
run_response = requests.get(run_summary['url'])
if run_response.status_code == 500 and 'Cannot serialize type Microsoft.Azure.Pipelines.WebApi.ContainerResource' in run_response.json()['message']:
# This run used a container resource, which AZP can no longer serialize for anonymous requests.
# Assume all older requests have this issue as well and stop further processing of runs.
# The issue was reported here: https://developercommunity.visualstudio.com/t/Pipelines-API-serialization-error-for-an/10294532
# A work-around for this issue was applied in: https://github.com/ansible/ansible/pull/80299
break
run_response.raise_for_status()
run = run_response.json()
if run['resources']['repositories']['self']['refName'] != 'refs/heads/%s' % BRANCH:
continue
if 'finishedDate' in run_summary:
age = datetime.datetime.now() - datetime.datetime.strptime(run['finishedDate'].split(".")[0], "%Y-%m-%dT%H:%M:%S")
if age > MAX_AGE:
break
artifact_response = requests.get("https://dev.azure.com/ansible/ansible/_apis/build/builds/%s/artifacts?api-version=6.0" % run['id'])
artifact_response.raise_for_status()
artifacts = artifact_response.json()['value']
if any(a["name"].startswith("Coverage") for a in artifacts):
# TODO wrongfully skipped if all jobs failed.
coverage_runs.append(run)
return coverage_runs
def pretty_coverage_runs(runs):
ended = []
in_progress = []
for run in runs:
if run.get('finishedDate'):
ended.append(run)
else:
in_progress.append(run)
for run in sorted(ended, key=lambda x: x['finishedDate']):
if run['result'] == "succeeded":
print('🙂 [%s] https://dev.azure.com/ansible/ansible/_build/results?buildId=%s (%s)' % (
stringc('PASS', 'green'),
run['id'],
run['finishedDate']))
else:
print('😢 [%s] https://dev.azure.com/ansible/ansible/_build/results?buildId=%s (%s)' % (
stringc('FAIL', 'red'),
run['id'],
run['finishedDate']))
if in_progress:
print('The following runs are ongoing:')
for run in in_progress:
print('🤔 [%s] https://dev.azure.com/ansible/ansible/_build/results?buildId=%s' % (
stringc('FATE', 'yellow'),
run['id']))
def main():
pretty_coverage_runs(get_coverage_runs())
if __name__ == '__main__':
main()
| 4,083
|
Python
|
.py
| 89
| 38.88764
| 156
| 0.654321
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,496
|
download.py
|
ansible_ansible/hacking/azp/download.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""CLI tool for downloading results from Azure Pipelines CI runs."""
from __future__ import annotations
# noinspection PyCompatibility
import argparse
import json
import os
import re
import io
import zipfile
import requests
try:
import argcomplete
except ImportError:
argcomplete = None
# Following changes should be made to improve the overall style:
# TODO use new style formatting method.
# TODO use requests session.
# TODO type hints.
# TODO pathlib.
def main():
"""Main program body."""
args = parse_args()
download_run(args)
def run_id_arg(arg):
m = re.fullmatch(r"(?:https:\/\/dev\.azure\.com\/ansible\/ansible\/_build\/results\?buildId=)?(\d+)", arg)
if not m:
raise ValueError("run does not seems to be a URI or an ID")
return m.group(1)
def parse_args():
"""Parse and return args."""
parser = argparse.ArgumentParser(description='Download results from a CI run.')
parser.add_argument('run', metavar='RUN', type=run_id_arg, help='AZP run id or URI')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='show what is being downloaded')
parser.add_argument('-t', '--test',
dest='test',
action='store_true',
help='show what would be downloaded without downloading')
parser.add_argument('-p', '--pipeline-id', type=int, default=20, help='pipeline to download the job from')
parser.add_argument('--artifacts',
action='store_true',
help='download artifacts')
parser.add_argument('--console-logs',
action='store_true',
help='download console logs')
parser.add_argument('--run-metadata',
action='store_true',
help='download run metadata')
parser.add_argument('--all',
action='store_true',
help='download everything')
parser.add_argument('--match-artifact-name',
default=re.compile('.*'),
type=re.compile,
help='only download artifacts which names match this regex')
parser.add_argument('--match-job-name',
default=re.compile('.*'),
type=re.compile,
help='only download artifacts from jobs which names match this regex')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.all:
args.artifacts = True
args.run_metadata = True
args.console_logs = True
selections = (
args.artifacts,
args.run_metadata,
args.console_logs
)
if not any(selections):
parser.error('At least one download option is required.')
return args
def download_run(args):
"""Download a run."""
output_dir = '%s' % args.run
if not args.test and not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.run_metadata:
run_url = 'https://dev.azure.com/ansible/ansible/_apis/pipelines/%s/runs/%s?api-version=6.0-preview.1' % (args.pipeline_id, args.run)
run_info_response = requests.get(run_url)
run_info_response.raise_for_status()
run = run_info_response.json()
path = os.path.join(output_dir, 'run.json')
contents = json.dumps(run, sort_keys=True, indent=4)
if args.verbose:
print(path)
if not args.test:
with open(path, 'w') as metadata_fd:
metadata_fd.write(contents)
timeline_response = requests.get('https://dev.azure.com/ansible/ansible/_apis/build/builds/%s/timeline?api-version=6.0' % args.run)
timeline_response.raise_for_status()
timeline = timeline_response.json()
roots = set()
by_id = {}
children_of = {}
parent_of = {}
for r in timeline['records']:
thisId = r['id']
parentId = r['parentId']
by_id[thisId] = r
if parentId is None:
roots.add(thisId)
else:
parent_of[thisId] = parentId
children_of[parentId] = children_of.get(parentId, []) + [thisId]
allowed = set()
def allow_recursive(ei):
allowed.add(ei)
for ci in children_of.get(ei, []):
allow_recursive(ci)
for ri in roots:
r = by_id[ri]
allowed.add(ri)
for ci in children_of.get(r['id'], []):
c = by_id[ci]
if not args.match_job_name.match("%s %s" % (r['name'], c['name'])):
continue
allow_recursive(c['id'])
if args.artifacts:
artifact_list_url = 'https://dev.azure.com/ansible/ansible/_apis/build/builds/%s/artifacts?api-version=6.0' % args.run
artifact_list_response = requests.get(artifact_list_url)
artifact_list_response.raise_for_status()
for artifact in artifact_list_response.json()['value']:
if artifact['source'] not in allowed or not args.match_artifact_name.match(artifact['name']):
continue
if args.verbose:
print('%s/%s' % (output_dir, artifact['name']))
if not args.test:
response = requests.get(artifact['resource']['downloadUrl'])
response.raise_for_status()
archive = zipfile.ZipFile(io.BytesIO(response.content))
archive.extractall(path=output_dir)
if args.console_logs:
for r in timeline['records']:
if not r['log'] or r['id'] not in allowed or not args.match_artifact_name.match(r['name']):
continue
names = []
parent_id = r['id']
while parent_id is not None:
p = by_id[parent_id]
name = p['name']
if name not in names:
names = [name] + names
parent_id = parent_of.get(p['id'], None)
path = " ".join(names)
# Some job names have the separator in them.
path = path.replace(os.sep, '_')
log_path = os.path.join(output_dir, '%s.log' % path)
if args.verbose:
print(log_path)
if not args.test:
log = requests.get(r['log']['url'])
log.raise_for_status()
open(log_path, 'wb').write(log.content)
if __name__ == '__main__':
main()
| 7,300
|
Python
|
.py
| 178
| 31.410112
| 141
| 0.591006
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,497
|
run.py
|
ansible_ansible/hacking/azp/run.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""CLI tool for starting new CI runs."""
from __future__ import annotations
# noinspection PyCompatibility
import argparse
import json
import os
import sys
import requests
import requests.auth
try:
import argcomplete
except ImportError:
argcomplete = None
# TODO: Dev does not have a token for AZP, somebody please test this.
# Following changes should be made to improve the overall style:
# TODO use new style formatting method.
# TODO type hints.
def main():
"""Main program body."""
args = parse_args()
key = os.environ.get('AZP_TOKEN', None)
if not key:
sys.stderr.write("please set you AZP token in AZP_TOKEN")
sys.exit(1)
start_run(args, key)
def parse_args():
"""Parse and return args."""
parser = argparse.ArgumentParser(description='Start a new CI run.')
parser.add_argument('-p', '--pipeline-id', type=int, default=20, help='pipeline to download the job from')
parser.add_argument('--ref', help='git ref name to run on')
parser.add_argument('--env',
nargs=2,
metavar=('KEY', 'VALUE'),
action='append',
help='environment variable to pass')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
return args
def start_run(args, key):
"""Start a new CI run."""
url = "https://dev.azure.com/ansible/ansible/_apis/pipelines/%s/runs?api-version=6.0-preview.1" % args.pipeline_id
payload = {"resources": {"repositories": {"self": {"refName": args.ref}}}}
resp = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', key), data=payload)
resp.raise_for_status()
print(json.dumps(resp.json(), indent=4, sort_keys=True))
if __name__ == '__main__':
main()
| 2,542
|
Python
|
.py
| 66
| 33.954545
| 118
| 0.690486
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,498
|
incidental.py
|
ansible_ansible/hacking/azp/incidental.py
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# (c) 2020 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""CLI tool for reporting on incidental test coverage."""
from __future__ import annotations
# noinspection PyCompatibility
import argparse
import glob
import json
import os
import re
import subprocess
import sys
import hashlib
try:
# noinspection PyPackageRequirements
import argcomplete
except ImportError:
argcomplete = None
# Following changes should be made to improve the overall style:
# TODO use new style formatting method.
# TODO type hints.
# TODO pathlib.
def main():
"""Main program body."""
args = parse_args()
try:
incidental_report(args)
except ApplicationError as ex:
sys.exit(ex)
def parse_args():
"""Parse and return args."""
source = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
parser = argparse.ArgumentParser(description='Report on incidental test coverage downloaded from Azure Pipelines.')
parser.add_argument('result',
type=directory,
help='path to directory containing test results downloaded from Azure Pipelines')
parser.add_argument('--output',
type=optional_directory,
default=os.path.join(source, 'test', 'results', '.tmp', 'incidental'),
help='path to directory where reports should be written')
parser.add_argument('--source',
type=optional_directory,
default=source,
help='path to git repository containing Ansible source')
parser.add_argument('--skip-checks',
action='store_true',
help='skip integrity checks, use only for debugging')
parser.add_argument('--ignore-cache',
dest='use_cache',
action='store_false',
help='ignore cached files')
parser.add_argument('-v', '--verbose',
action='store_true',
help='increase verbosity')
parser.add_argument('--result-sha',
default=None,
help='Override the result sha')
targets = parser.add_mutually_exclusive_group()
targets.add_argument('--targets',
type=regex,
default='^incidental_',
help='regex for targets to analyze, default: %(default)s')
targets.add_argument('--plugin-path',
help='path to plugin to report incidental coverage on')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
return args
def optional_directory(value):
if not os.path.exists(value):
return value
return directory(value)
def directory(value):
if not os.path.isdir(value):
raise argparse.ArgumentTypeError('"%s" is not a directory' % value)
return value
def regex(value):
try:
return re.compile(value)
except Exception as ex:
raise argparse.ArgumentTypeError('"%s" is not a valid regex: %s' % (value, ex))
def incidental_report(args):
"""Generate incidental coverage report."""
ct = CoverageTool()
git = Git(os.path.abspath(args.source))
coverage_data = CoverageData(os.path.abspath(args.result))
result_sha = args.result_sha or coverage_data.result_sha
try:
git.show([result_sha, '--'])
except subprocess.CalledProcessError:
raise ApplicationError('%s: commit not found: %s\n'
'make sure your source repository is up-to-date' % (git.path, result_sha))
if coverage_data.result != "succeeded":
check_failed(args, 'results indicate tests did not pass (result: %s)\n'
're-run until passing, then download the latest results and re-run the report using those results' % coverage_data.result)
if not coverage_data.paths:
raise ApplicationError('no coverage data found\n'
'make sure the downloaded results are from a code coverage run on Azure Pipelines')
# generate a unique subdirectory in the output directory based on the input files being used
path_hash = hashlib.sha256(b'\n'.join(p.encode() for p in coverage_data.paths)).hexdigest()
output_path = os.path.abspath(os.path.join(args.output, path_hash))
data_path = os.path.join(output_path, 'data')
reports_path = os.path.join(output_path, 'reports')
for path in [data_path, reports_path]:
if not os.path.exists(path):
os.makedirs(path)
# combine coverage results into a single file
combined_path = os.path.join(output_path, 'combined.json')
cached(combined_path, args.use_cache, args.verbose,
lambda: ct.combine(coverage_data.paths, combined_path))
with open(combined_path) as combined_file:
combined = json.load(combined_file)
if args.plugin_path:
# reporting on coverage missing from the test target for the specified plugin
# the report will be on a single target
cache_path_format = '%s' + '-for-%s' % os.path.splitext(os.path.basename(args.plugin_path))[0]
target_pattern = '^%s$' % get_target_name_from_plugin_path(args.plugin_path)
include_path = args.plugin_path
missing = True
target_name = get_target_name_from_plugin_path(args.plugin_path)
else:
# reporting on coverage exclusive to the matched targets
# the report can contain multiple targets
cache_path_format = '%s'
target_pattern = args.targets
include_path = None
missing = False
target_name = None
# identify integration test targets to analyze
target_names = sorted(combined['targets'])
incidental_target_names = [target for target in target_names if re.search(target_pattern, target)]
if not incidental_target_names:
if target_name:
# if the plugin has no tests we still want to know what coverage is missing
incidental_target_names = [target_name]
else:
raise ApplicationError('no targets to analyze')
# exclude test support plugins from analysis
# also exclude six, which for an unknown reason reports bogus coverage lines (indicating coverage of comments)
exclude_path = '^(test/support/|lib/ansible/module_utils/six/)'
# process coverage for each target and then generate a report
# save sources for generating a summary report at the end
summary = {}
report_paths = {}
for target_name in incidental_target_names:
cache_name = cache_path_format % target_name
only_target_path = os.path.join(data_path, 'only-%s.json' % cache_name)
cached(only_target_path, args.use_cache, args.verbose,
lambda: ct.filter(combined_path, only_target_path, include_targets=[target_name], include_path=include_path, exclude_path=exclude_path))
without_target_path = os.path.join(data_path, 'without-%s.json' % cache_name)
cached(without_target_path, args.use_cache, args.verbose,
lambda: ct.filter(combined_path, without_target_path, exclude_targets=[target_name], include_path=include_path, exclude_path=exclude_path))
if missing:
source_target_path = missing_target_path = os.path.join(data_path, 'missing-%s.json' % cache_name)
cached(missing_target_path, args.use_cache, args.verbose,
lambda: ct.missing(without_target_path, only_target_path, missing_target_path, only_gaps=True))
else:
source_target_path = exclusive_target_path = os.path.join(data_path, 'exclusive-%s.json' % cache_name)
cached(exclusive_target_path, args.use_cache, args.verbose,
lambda: ct.missing(only_target_path, without_target_path, exclusive_target_path, only_gaps=True))
source_expanded_target_path = os.path.join(os.path.dirname(source_target_path), 'expanded-%s' % os.path.basename(source_target_path))
cached(source_expanded_target_path, args.use_cache, args.verbose,
lambda: ct.expand(source_target_path, source_expanded_target_path))
summary[target_name] = sources = collect_sources(source_expanded_target_path, git, coverage_data, result_sha)
txt_report_path = os.path.join(reports_path, '%s.txt' % cache_name)
cached(txt_report_path, args.use_cache, args.verbose,
lambda: generate_report(sources, txt_report_path, coverage_data, target_name, missing=missing))
report_paths[target_name] = txt_report_path
# provide a summary report of results
for target_name in incidental_target_names:
sources = summary[target_name]
report_path = os.path.relpath(report_paths[target_name])
print('%s: %d arcs, %d lines, %d files - %s' % (
target_name,
sum(len(s.covered_arcs) for s in sources),
sum(len(s.covered_lines) for s in sources),
len(sources),
report_path,
))
if not missing:
sys.stderr.write('NOTE: This report shows only coverage exclusive to the reported targets. '
'As targets are removed, exclusive coverage on the remaining targets will increase.\n')
def get_target_name_from_plugin_path(path): # type: (str) -> str
"""Return the integration test target name for the given plugin path."""
parts = os.path.splitext(path)[0].split(os.path.sep)
plugin_name = parts[-1]
if path.startswith('lib/ansible/modules/'):
plugin_type = None
elif path.startswith('lib/ansible/plugins/'):
plugin_type = parts[3]
elif path.startswith('lib/ansible/module_utils/'):
plugin_type = parts[2]
elif path.startswith('plugins/'):
plugin_type = parts[1]
else:
raise ApplicationError('Cannot determine plugin type from plugin path: %s' % path)
if plugin_type is None:
target_name = plugin_name
else:
target_name = '%s_%s' % (plugin_type, plugin_name)
return target_name
class CoverageData:
def __init__(self, result_path):
with open(os.path.join(result_path, 'run.json')) as run_file:
run = json.load(run_file)
self.result_sha = run["resources"]["repositories"]["self"]["version"]
self.result = run['result']
self.github_base_url = 'https://github.com/ansible/ansible/blob/%s/' % self.result_sha
# locate available results
self.paths = sorted(glob.glob(os.path.join(result_path, '*', 'coverage-analyze-targets.json')))
class Git:
def __init__(self, path):
self.git = 'git'
self.path = path
try:
self.show()
except subprocess.CalledProcessError:
raise ApplicationError('%s: not a git repository' % path)
def show(self, args=None):
return self.run(['show'] + (args or []))
def run(self, command):
return subprocess.check_output([self.git] + command, cwd=self.path)
class CoverageTool:
def __init__(self):
self.analyze_cmd = ['ansible-test', 'coverage', 'analyze', 'targets']
def combine(self, input_paths, output_path):
subprocess.check_call(self.analyze_cmd + ['combine'] + input_paths + [output_path])
def filter(self, input_path, output_path, include_targets=None, exclude_targets=None, include_path=None, exclude_path=None):
args = []
if include_targets:
for target in include_targets:
args.extend(['--include-target', target])
if exclude_targets:
for target in exclude_targets:
args.extend(['--exclude-target', target])
if include_path:
args.extend(['--include-path', include_path])
if exclude_path:
args.extend(['--exclude-path', exclude_path])
subprocess.check_call(self.analyze_cmd + ['filter', input_path, output_path] + args)
def missing(self, from_path, to_path, output_path, only_gaps=False):
args = []
if only_gaps:
args.append('--only-gaps')
subprocess.check_call(self.analyze_cmd + ['missing', from_path, to_path, output_path] + args)
def expand(self, input_path, output_path):
subprocess.check_call(self.analyze_cmd + ['expand', input_path, output_path])
class SourceFile:
def __init__(self, path, source, coverage_data, coverage_points):
self.path = path
self.lines = source.decode().splitlines()
self.coverage_data = coverage_data
self.coverage_points = coverage_points
self.github_url = coverage_data.github_base_url + path
is_arcs = ':' in dict(coverage_points).popitem()[0]
if is_arcs:
parse = parse_arc
else:
parse = int
self.covered_points = set(parse(v) for v in coverage_points)
self.covered_arcs = self.covered_points if is_arcs else None
self.covered_lines = set(abs(p[0]) for p in self.covered_points) | set(abs(p[1]) for p in self.covered_points)
def collect_sources(data_path, git, coverage_data, result_sha):
with open(data_path) as data_file:
data = json.load(data_file)
sources = []
for path_coverage in data.values():
for path, path_data in path_coverage.items():
sources.append(SourceFile(path, git.show(['%s:%s' % (result_sha, path)]), coverage_data, path_data))
return sources
def generate_report(sources, report_path, coverage_data, target_name, missing):
output = [
'Target: %s (%s coverage)' % (target_name, 'missing' if missing else 'exclusive'),
'GitHub: %stest/integration/targets/%s' % (coverage_data.github_base_url, target_name),
]
for source in sources:
if source.covered_arcs:
output.extend([
'',
'Source: %s (%d arcs, %d/%d lines):' % (source.path, len(source.covered_arcs), len(source.covered_lines), len(source.lines)),
'GitHub: %s' % source.github_url,
'',
])
else:
output.extend([
'',
'Source: %s (%d/%d lines):' % (source.path, len(source.covered_lines), len(source.lines)),
'GitHub: %s' % source.github_url,
'',
])
last_line_no = 0
for line_no, line in enumerate(source.lines, start=1):
if line_no not in source.covered_lines:
continue
if last_line_no and last_line_no != line_no - 1:
output.append('')
notes = ''
if source.covered_arcs:
from_lines = sorted(p[0] for p in source.covered_points if abs(p[1]) == line_no)
to_lines = sorted(p[1] for p in source.covered_points if abs(p[0]) == line_no)
if from_lines:
notes += ' ### %s -> (here)' % ', '.join(str(from_line) for from_line in from_lines)
if to_lines:
notes += ' ### (here) -> %s' % ', '.join(str(to_line) for to_line in to_lines)
output.append('%4d %s%s' % (line_no, line, notes))
last_line_no = line_no
with open(report_path, 'w') as report_file:
report_file.write('\n'.join(output) + '\n')
def parse_arc(value):
return tuple(int(v) for v in value.split(':'))
def cached(path, use_cache, show_messages, func):
if os.path.exists(path) and use_cache:
if show_messages:
sys.stderr.write('%s: cached\n' % path)
sys.stderr.flush()
return
if show_messages:
sys.stderr.write('%s: generating ... ' % path)
sys.stderr.flush()
func()
if show_messages:
sys.stderr.write('done\n')
sys.stderr.flush()
def check_failed(args, message):
if args.skip_checks:
sys.stderr.write('WARNING: %s\n' % message)
return
raise ApplicationError(message)
class ApplicationError(Exception):
pass
if __name__ == '__main__':
main()
| 16,861
|
Python
|
.py
| 348
| 39.287356
| 154
| 0.634067
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,499
|
ansible_pytest_coverage.py
|
ansible_ansible/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_coverage.py
|
"""Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`. PYTEST_DONT_REWRITE"""
from __future__ import annotations
def pytest_configure():
"""Configure this pytest plugin."""
try:
if pytest_configure.executed:
return
except AttributeError:
pytest_configure.executed = True
try:
import coverage
except ImportError:
coverage = None
try:
coverage.Coverage
except AttributeError:
coverage = None
if not coverage:
return
import gc
import os
coverage_instances = []
for obj in gc.get_objects():
if isinstance(obj, coverage.Coverage):
coverage_instances.append(obj)
if not coverage_instances:
coverage_config = os.environ.get('COVERAGE_CONF')
if not coverage_config:
return
coverage_output = os.environ.get('COVERAGE_FILE')
if not coverage_output:
return
cov = coverage.Coverage(config_file=coverage_config)
coverage_instances.append(cov)
else:
cov = None
# noinspection PyProtectedMember
os_exit = os._exit # pylint: disable=protected-access
def coverage_exit(*args, **kwargs):
for instance in coverage_instances:
# skip coverage instances which have no collector, or the collector is not the active collector
# this avoids issues with coverage 7.4.0+ when tests create subprocesses which inherit our overridden os._exit method
# pylint: disable=protected-access
if not instance._collector or not instance._collector._collectors or instance._collector != instance._collector._collectors[-1]:
continue
instance.stop()
instance.save()
os_exit(*args, **kwargs)
os._exit = coverage_exit # pylint: disable=protected-access
if cov:
cov.start()
pytest_configure()
| 1,998
|
Python
|
.pyt
| 52
| 30.173077
| 147
| 0.656104
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|