id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,300
|
base.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/base.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import annotations
import ansible.module_utils.compat.typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
class Hardware:
platform = 'Generic'
# FIXME: remove load_on_init when we can
def __init__(self, module, load_on_init=False):
self.module = module
def populate(self, collected_facts=None):
return {}
class HardwareCollector(BaseFactCollector):
name = 'hardware'
_fact_ids = set(['processor',
'processor_cores',
'processor_count',
# TODO: mounts isnt exactly hardware
'mounts',
'devices']) # type: t.Set[str]
_fact_class = Hardware
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| 2,751
|
Python
|
.py
| 54
| 45.759259
| 92
| 0.725782
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,301
|
freebsd.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/freebsd.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import json
import re
import struct
import time
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.timeout import TimeoutError, timeout
from ansible.module_utils.facts.utils import get_file_content, get_mount_size
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
- uptime_seconds
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
uptime_facts = self.get_uptime_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
sysctl = self.module.get_bin_path('sysctl')
if sysctl:
rc, out, err = self.module.run_command("%s -n hw.ncpu" % sysctl, check_rc=False)
cpu_facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
try:
rc, dmesg_boot, err = self.module.run_command(self.module.get_bin_path("dmesg"), check_rc=False)
except Exception:
dmesg_boot = ''
for line in dmesg_boot.splitlines():
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
cpu_facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
cpu_facts['processor_cores'] = line.split()[4]
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
sysctl = self.module.get_bin_path('sysctl')
if sysctl:
rc, out, err = self.module.run_command("%s vm.stats" % sysctl, check_rc=False)
for line in out.splitlines():
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = int(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = int(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = int(data[1])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
swapinfo = self.module.get_bin_path('swapinfo')
if swapinfo:
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("%s -k" % swapinfo)
lines = out.splitlines()
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
memory_facts['swaptotal_mb'] = int(data[1]) // 1024
memory_facts['swapfree_mb'] = int(data[3]) // 1024
return memory_facts
def get_uptime_facts(self):
# On FreeBSD, the default format is annoying to parse.
# Use -b to get the raw value and decode it.
sysctl_cmd = self.module.get_bin_path('sysctl')
cmd = [sysctl_cmd, '-b', 'kern.boottime']
# We need to get raw bytes, not UTF-8.
rc, out, err = self.module.run_command(cmd, encoding=None)
# kern.boottime returns seconds and microseconds as two 64-bits
# fields, but we are only interested in the first field.
struct_format = '@L'
struct_size = struct.calcsize(struct_format)
if rc != 0 or len(out) < struct_size:
return {}
(kern_boottime, ) = struct.unpack(struct_format, out[:struct_size])
return {
'uptime_seconds': int(time.time() - kern_boottime),
}
@timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+', ' ', line).split()
mount_statvfs_info = get_mount_size(fields[1])
mount_info = {'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3]}
mount_info.update(mount_statvfs_info)
mount_facts['mounts'].append(mount_info)
return mount_facts
def get_device_facts(self):
device_facts = {}
sysdir = '/dev'
device_facts['devices'] = {}
# TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
drives = re.compile(
r"""(?x)(
(?:
ada? # ATA/SATA disk device
|da # SCSI disk device
|a?cd # SCSI CDROM drive
|amrd # AMI MegaRAID drive
|idad # Compaq RAID array
|ipsd # IBM ServeRAID RAID array
|md # md(4) disk device
|mfid # LSI MegaRAID SAS array
|mlxd # Mylex RAID disk
|twed # 3ware ATA RAID array
|vtbd # VirtIO Block Device
)\d+
)
"""
)
slices = re.compile(
r"""(?x)(
(?:
ada? # ATA/SATA disk device
|a?cd # SCSI CDROM drive
|amrd # AMI MegaRAID drive
|da # SCSI disk device
|idad # Compaq RAID array
|ipsd # IBM ServeRAID RAID array
|md # md(4) disk device
|mfid # LSI MegaRAID SAS array
|mlxd # Mylex RAID disk
|twed # 3ware ATA RAID array
|vtbd # VirtIO Block Device
)\d+[ps]\d+\w*
)
"""
)
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d and d.group(1) not in device_facts['devices']:
device_facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
device_facts['devices'][d.group(1)].append(s.group(1))
return device_facts
def get_dmi_facts(self):
""" learn dmi facts from system
Use dmidecode executable if available"""
dmi_facts = {}
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_vendor': 'bios-vendor',
'bios_version': 'bios-version',
'board_asset_tag': 'baseboard-asset-tag',
'board_name': 'baseboard-product-name',
'board_serial': 'baseboard-serial-number',
'board_vendor': 'baseboard-manufacturer',
'board_version': 'baseboard-version',
'chassis_asset_tag': 'chassis-asset-tag',
'chassis_serial': 'chassis-serial-number',
'chassis_vendor': 'chassis-manufacturer',
'chassis_version': 'chassis-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer',
}
if dmi_bin is None:
dmi_facts = dict.fromkeys(
DMI_DICT.keys(),
'NA'
)
return dmi_facts
for (k, v) in DMI_DICT.items():
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
# FIXME: why add the fact and then test if it is json?
dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
try:
json.dumps(dmi_facts[k])
except UnicodeDecodeError:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
return dmi_facts
class FreeBSDHardwareCollector(HardwareCollector):
_fact_class = FreeBSDHardware
_platform = 'FreeBSD'
| 10,021
|
Python
|
.py
| 241
| 30.219917
| 112
| 0.54748
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,302
|
aix.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/aix.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import re
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.utils import get_mount_size
class AIXHardware(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_count
- processor_cores
- processor_threads_per_core
- processor_vcpus
"""
platform = 'AIX'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
vgs_facts = self.get_vgs_facts()
mount_facts = self.get_mount_facts()
devices_facts = self.get_device_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(vgs_facts)
hardware_facts.update(mount_facts)
hardware_facts.update(devices_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
# FIXME: not clear how to detect multi-sockets
cpu_facts['processor_count'] = 1
rc, out, err = self.module.run_command(
"/usr/sbin/lsdev -Cc processor"
)
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
cpu_facts['processor_cores'] = int(i)
rc, out, err = self.module.run_command(
"/usr/sbin/lsattr -El " + cpudev + " -a type"
)
data = out.split(' ')
cpu_facts['processor'] = [data[1]]
cpu_facts['processor_threads_per_core'] = 1
rc, out, err = self.module.run_command(
"/usr/sbin/lsattr -El " + cpudev + " -a smt_threads"
)
if out:
data = out.split(' ')
cpu_facts['processor_threads_per_core'] = int(data[1])
cpu_facts['processor_vcpus'] = (
cpu_facts['processor_cores'] * cpu_facts['processor_threads_per_core']
)
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
memory_facts['swaptotal_mb'] = swaptotal_mb
memory_facts['swapfree_mb'] = int(swaptotal_mb * (100 - percused) / 100)
return memory_facts
def get_dmi_facts(self):
dmi_facts = {}
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
dmi_facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
dmi_facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
dmi_facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
dmi_facts['product_name'] = data[1].strip()
return dmi_facts
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
vgs_facts = {}
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s -o | %s %s -p" % (lsvg_path, xargs_path, lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc == 0 and out:
vgs_facts['vgs'] = {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
vgs_facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path, m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)', out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*', m.group(0)):
pv_info = {'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
vgs_facts['vgs'][m.group(1)].append(pv_info)
return vgs_facts
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
mounts = []
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
if mount_path:
rc, mount_out, err = self.module.run_command(mount_path)
if mount_out:
for line in mount_out.split('\n'):
fields = line.split()
if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
if re.match('^/', fields[0]):
# normal mount
mount = fields[1]
mount_info = {'mount': mount,
'device': fields[0],
'fstype': fields[2],
'options': fields[6],
'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
mount_info.update(get_mount_size(mount))
else:
# nfs or cifs based mount
# in case of nfs if no mount options are provided on command line
# add into fields empty string...
if len(fields) < 8:
fields.append("")
mount_info = {'mount': fields[2],
'device': '%s:%s' % (fields[0], fields[1]),
'fstype': fields[3],
'options': fields[7],
'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
mounts.append(mount_info)
mount_facts['mounts'] = mounts
return mount_facts
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lsdev_cmd = self.module.get_bin_path('lsdev')
lsattr_cmd = self.module.get_bin_path('lsattr')
if lsdev_cmd and lsattr_cmd:
rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
for line in out_lsdev.splitlines():
field = line.split()
device_attrs = {}
device_name = field[0]
device_state = field[1]
device_type = field[2:]
lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
for attr in out_lsattr.splitlines():
attr_fields = attr.split()
attr_name = attr_fields[0]
attr_parameter = attr_fields[1]
device_attrs[attr_name] = attr_parameter
device_facts['devices'][device_name] = {
'state': device_state,
'type': ' '.join(device_type),
'attributes': device_attrs
}
return device_facts
class AIXHardwareCollector(HardwareCollector):
_platform = 'AIX'
_fact_class = AIXHardware
| 10,681
|
Python
|
.py
| 227
| 32.929515
| 141
| 0.491934
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,303
|
dragonfly.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/dragonfly.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.module_utils.facts.hardware.base import HardwareCollector
from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardware
class DragonFlyHardwareCollector(HardwareCollector):
# Note: This uses the freebsd fact class, there is no dragonfly hardware fact class
_fact_class = FreeBSDHardware
_platform = 'DragonFly'
| 1,037
|
Python
|
.py
| 21
| 47.619048
| 87
| 0.794466
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,304
|
netbsd.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/netbsd.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import re
import time
from ansible.module_utils.six.moves import reduce
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.timeout import TimeoutError, timeout
from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
from ansible.module_utils.facts.sysctl import get_sysctl
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
- uptime_seconds
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def populate(self, collected_facts=None):
hardware_facts = {}
self.sysctl = get_sysctl(self.module, ['machdep'])
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except TimeoutError:
pass
dmi_facts = self.get_dmi_facts()
uptime_facts = self.get_uptime_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(mount_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(uptime_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return cpu_facts
cpu_facts['processor'] = []
for line in get_file_lines("/proc/cpuinfo"):
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in cpu_facts:
cpu_facts['processor'] = []
cpu_facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
cpu_facts['processor_count'] = len(sockets)
cpu_facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
cpu_facts['processor_count'] = i
cpu_facts['processor_cores'] = 'NA'
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
if not os.access("/proc/meminfo", os.R_OK):
return memory_facts
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memory_facts["%s_mb" % key.lower()] = int(val) // 1024
return memory_facts
@timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if not fstab:
return mount_facts
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+', ' ', line).split()
mount_statvfs_info = get_mount_size(fields[1])
mount_info = {'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3]}
mount_info.update(mount_statvfs_info)
mount_facts['mounts'].append(mount_info)
return mount_facts
def get_dmi_facts(self):
dmi_facts = {}
# We don't use dmidecode(8) here because:
# - it would add dependency on an external package
# - dmidecode(8) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'machdep.dmi.system-product': 'product_name',
'machdep.dmi.system-version': 'product_version',
'machdep.dmi.system-uuid': 'product_uuid',
'machdep.dmi.system-serial': 'product_serial',
'machdep.dmi.system-vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
dmi_facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
return dmi_facts
def get_uptime_facts(self):
# On NetBSD, we need to call sysctl with -n to get this value as an int.
sysctl_cmd = self.module.get_bin_path('sysctl')
if sysctl_cmd is None:
return {}
cmd = [sysctl_cmd, '-n', 'kern.boottime']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return {}
kern_boottime = out.strip()
if not kern_boottime.isdigit():
return {}
return {
'uptime_seconds': int(time.time() - int(kern_boottime)),
}
class NetBSDHardwareCollector(HardwareCollector):
_fact_class = NetBSDHardware
_platform = 'NetBSD'
| 6,234
|
Python
|
.py
| 154
| 31.233766
| 93
| 0.594577
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,305
|
hpux.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/hpux.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import re
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
class HPUXHardware(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def populate(self, collected_facts=None):
hardware_facts = {}
# TODO: very inefficient calls to machinfo,
# should just make one and then deal with finding the data (see facts/sysctl)
# but not going to change unless there is hp/ux for testing
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
hw_facts = self.get_hw_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(hw_facts)
return hardware_facts
def get_cpu_facts(self, collected_facts=None):
cpu_facts = {}
collected_facts = collected_facts or {}
if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
cpu_facts['processor_count'] = int(out.strip())
# Working with machinfo mess
elif collected_facts.get('ansible_architecture') == 'ia64':
if collected_facts.get('ansible_distribution_version') == "B.11.23":
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
if out:
cpu_facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
if out:
cpu_facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
cpu_facts['processor_cores'] = int(out.strip())
if collected_facts.get('ansible_distribution_version') == "B.11.31":
# if machinfo return cores strings release B.11.31 > 1204
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip() == '0':
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
# If hyperthreading is active divide cores by 2
rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +', ' ', out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
cpu_facts['processor_cores'] = int(data[0]) / 2
else:
if len(data) == 1:
cpu_facts['processor_cores'] = cpu_facts['processor_count']
else:
cpu_facts['processor_cores'] = int(data[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
cpu_facts['processor'] = out.strip()
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
cpu_facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
cpu_facts['processor'] = out.strip()
return cpu_facts
def get_memory_facts(self, collected_facts=None):
memory_facts = {}
collected_facts = collected_facts or {}
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +', ' ', out).split(' ')[5].strip())
memory_facts['memfree_mb'] = pagesize * data // 1024 // 1024
if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
try:
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*', out).groups()[0].strip()
memory_facts['memtotal_mb'] = int(data) // 1024
except AttributeError:
# For systems where memory details aren't sent to syslog or the log has rotated, use parsed
# adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
use_unsafe_shell=True)
if not err:
data = out
memory_facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search(r'Memory[\ :=]*([0-9]*).*MB.*', out).groups()[0].strip()
memory_facts['memtotal_mb'] = int(data)
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
memory_facts['swaptotal_mb'] = int(out.strip())
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().splitlines():
swap += int(re.sub(' +', ' ', line).split(' ')[3].strip())
memory_facts['swapfree_mb'] = swap
return memory_facts
def get_hw_facts(self, collected_facts=None):
hw_facts = {}
collected_facts = collected_facts or {}
rc, out, err = self.module.run_command("model")
hw_facts['model'] = out.strip()
if collected_facts.get('ansible_architecture') == 'ia64':
separator = ':'
if collected_facts.get('ansible_distribution_version') == "B.11.23":
separator = '='
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
hw_facts['firmware_version'] = out.split(separator)[1].strip()
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ", use_unsafe_shell=True)
if rc == 0 and out:
hw_facts['product_serial'] = out.split(separator)[1].strip()
return hw_facts
class HPUXHardwareCollector(HardwareCollector):
_fact_class = HPUXHardware
_platform = 'HP-UX'
required_facts = set(['platform', 'distribution'])
| 8,504
|
Python
|
.py
| 145
| 46.413793
| 147
| 0.582944
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,306
|
darwin.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/darwin.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import struct
import time
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.sysctl import get_sysctl
class DarwinHardware(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
- uptime_seconds
"""
platform = 'Darwin'
def populate(self, collected_facts=None):
hardware_facts = {}
self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern', 'hw.model'])
mac_facts = self.get_mac_facts()
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
uptime_facts = self.get_uptime_facts()
hardware_facts.update(mac_facts)
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(uptime_facts)
return hardware_facts
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
mac_facts = {}
if 'hw.model' in self.sysctl:
mac_facts['model'] = mac_facts['product_name'] = self.sysctl['hw.model']
mac_facts['osversion'] = self.sysctl['kern.osversion']
mac_facts['osrevision'] = self.sysctl['kern.osrevision']
return mac_facts
def get_cpu_facts(self):
cpu_facts = {}
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
cpu_facts['processor'] = self.sysctl['machdep.cpu.brand_string']
cpu_facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
cpu_facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
cpu_facts['processor_cores'] = self.sysctl['hw.physicalcpu']
cpu_facts['processor_vcpus'] = self.sysctl.get('hw.logicalcpu') or self.sysctl.get('hw.ncpu') or ''
return cpu_facts
def get_memory_facts(self):
memory_facts = {
'memtotal_mb': int(self.sysctl['hw.memsize']) // 1024 // 1024,
'memfree_mb': 0,
}
total_used = 0
page_size = 4096
vm_stat_command = self.module.get_bin_path('vm_stat')
if vm_stat_command is None:
return memory_facts
if vm_stat_command:
rc, out, err = self.module.run_command(vm_stat_command)
if rc == 0:
# Free = Total - (Wired + active + inactive)
# Get a generator of tuples from the command output so we can later
# turn it into a dictionary
memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines())
# Strip extra left spaces from the value
memory_stats = dict((k, v.lstrip()) for k, v in memory_stats)
for k, v in memory_stats.items():
try:
memory_stats[k] = int(v)
except ValueError:
# Most values convert cleanly to integer values but if the field does
# not convert to an integer, just leave it alone.
pass
if memory_stats.get('Pages wired down'):
total_used += memory_stats['Pages wired down'] * page_size
if memory_stats.get('Pages active'):
total_used += memory_stats['Pages active'] * page_size
if memory_stats.get('Pages inactive'):
total_used += memory_stats['Pages inactive'] * page_size
memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024)
return memory_facts
def get_uptime_facts(self):
# On Darwin, the default format is annoying to parse.
# Use -b to get the raw value and decode it.
sysctl_cmd = self.module.get_bin_path('sysctl')
if not sysctl_cmd:
return {}
cmd = [sysctl_cmd, '-b', 'kern.boottime']
# We need to get raw bytes, not UTF-8.
rc, out, err = self.module.run_command(cmd, encoding=None)
# kern.boottime returns seconds and microseconds as two 64-bits
# fields, but we are only interested in the first field.
struct_format = '@L'
struct_size = struct.calcsize(struct_format)
if rc != 0 or len(out) < struct_size:
return {}
(kern_boottime, ) = struct.unpack(struct_format, out[:struct_size])
return {
'uptime_seconds': int(time.time() - kern_boottime),
}
class DarwinHardwareCollector(HardwareCollector):
_fact_class = DarwinHardware
_platform = 'Darwin'
| 5,899
|
Python
|
.py
| 128
| 36.484375
| 118
| 0.609097
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,307
|
openbsd.py
|
ansible_ansible/lib/ansible/module_utils/facts/hardware/openbsd.py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import re
import time
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts import timeout
from ansible.module_utils.facts.utils import get_file_content, get_mount_size
from ansible.module_utils.facts.sysctl import get_sysctl
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
- uptime_seconds
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'OpenBSD'
def populate(self, collected_facts=None):
hardware_facts = {}
self.sysctl = get_sysctl(self.module, ['hw'])
hardware_facts.update(self.get_processor_facts())
hardware_facts.update(self.get_memory_facts())
hardware_facts.update(self.get_device_facts())
hardware_facts.update(self.get_dmi_facts())
hardware_facts.update(self.get_uptime_facts())
# storage devices notoriously prone to hang/block so they are under a timeout
try:
hardware_facts.update(self.get_mount_facts())
except timeout.TimeoutError:
pass
return hardware_facts
@timeout.timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+', ' ', line).split()
if fields[1] == 'none' or fields[3] == 'xx':
continue
mount_statvfs_info = get_mount_size(fields[1])
mount_info = {'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3]}
mount_info.update(mount_statvfs_info)
mount_facts['mounts'].append(mount_info)
return mount_facts
def get_memory_facts(self):
memory_facts = {}
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = self.module.run_command("/usr/bin/vmstat")
if rc == 0:
memory_facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) // 1024
memory_facts['memtotal_mb'] = int(self.sysctl['hw.physmem']) // 1024 // 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = self.module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = {ord(u'k'): None,
ord(u'm'): None,
ord(u'g'): None}
data = to_text(out, errors='surrogate_or_strict').split()
memory_facts['swapfree_mb'] = int(data[-2].translate(swaptrans)) // 1024
memory_facts['swaptotal_mb'] = int(data[1].translate(swaptrans)) // 1024
return memory_facts
def get_uptime_facts(self):
# On openbsd, we need to call it with -n to get this value as an int.
sysctl_cmd = self.module.get_bin_path('sysctl')
if sysctl_cmd is None:
return {}
cmd = [sysctl_cmd, '-n', 'kern.boottime']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return {}
kern_boottime = out.strip()
if not kern_boottime.isdigit():
return {}
return {
'uptime_seconds': int(time.time() - int(kern_boottime)),
}
def get_processor_facts(self):
cpu_facts = {}
processor = []
for i in range(int(self.sysctl['hw.ncpuonline'])):
processor.append(self.sysctl['hw.model'])
cpu_facts['processor'] = processor
# The following is partly a lie because there is no reliable way to
# determine the number of physical CPUs in the system. We can only
# query the number of logical CPUs, which hides the number of cores.
# On amd64/i386 we could try to inspect the smt/core/package lines in
# dmesg, however even those have proven to be unreliable.
# So take a shortcut and report the logical number of processors in
# 'processor_count' and 'processor_cores' and leave it at that.
cpu_facts['processor_count'] = self.sysctl['hw.ncpuonline']
cpu_facts['processor_cores'] = self.sysctl['hw.ncpuonline']
return cpu_facts
def get_device_facts(self):
device_facts = {}
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
device_facts['devices'] = devices
return device_facts
def get_dmi_facts(self):
dmi_facts = {}
# We don't use dmidecode(8) here because:
# - it would add dependency on an external package
# - dmidecode(8) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'hw.product': 'product_name',
'hw.version': 'product_version',
'hw.uuid': 'product_uuid',
'hw.serialno': 'product_serial',
'hw.vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
dmi_facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
return dmi_facts
class OpenBSDHardwareCollector(HardwareCollector):
_fact_class = OpenBSDHardware
_platform = 'OpenBSD'
| 6,934
|
Python
|
.py
| 153
| 36.235294
| 89
| 0.609218
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,308
|
datetime.py
|
ansible_ansible/lib/ansible/module_utils/compat/datetime.py
|
# Copyright (c) 2023 Ansible
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import annotations
from ansible.module_utils.six import PY3
import datetime
if PY3:
UTC = datetime.timezone.utc
else:
_ZERO = datetime.timedelta(0)
class _UTC(datetime.tzinfo):
__slots__ = ()
def utcoffset(self, dt):
return _ZERO
def dst(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
UTC = _UTC()
def utcfromtimestamp(timestamp): # type: (float) -> datetime.datetime
"""Construct an aware UTC datetime from a POSIX timestamp."""
return datetime.datetime.fromtimestamp(timestamp, UTC)
def utcnow(): # type: () -> datetime.datetime
"""Construct an aware UTC datetime from time.time()."""
return datetime.datetime.now(UTC)
| 901
|
Python
|
.py
| 24
| 31.791667
| 106
| 0.67555
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,309
|
paramiko.py
|
ansible_ansible/lib/ansible/module_utils/compat/paramiko.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import annotations
import types # pylint: disable=unused-import
import warnings
PARAMIKO_IMPORT_ERR = None
try:
with warnings.catch_warnings():
# Blowfish has been moved, but the deprecated import is used by paramiko versions older than 2.9.5.
# See: https://github.com/paramiko/paramiko/pull/2039
warnings.filterwarnings('ignore', message='Blowfish has been ', category=UserWarning)
# TripleDES has been moved, but the deprecated import is used by paramiko versions older than 3.3.2 and 3.4.1.
# See: https://github.com/paramiko/paramiko/pull/2421
warnings.filterwarnings('ignore', message='TripleDES has been ', category=UserWarning)
import paramiko # pylint: disable=unused-import
# paramiko and gssapi are incompatible and raise AttributeError not ImportError
# When running in FIPS mode, cryptography raises InternalError
# https://bugzilla.redhat.com/show_bug.cgi?id=1778939
except Exception as err:
paramiko = None # type: types.ModuleType | None # type: ignore[no-redef]
PARAMIKO_IMPORT_ERR = err
| 1,265
|
Python
|
.py
| 22
| 53.227273
| 118
| 0.74657
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,310
|
selinux.py
|
ansible_ansible/lib/ansible/module_utils/compat/selinux.py
|
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import sys
from ansible.module_utils.common.text.converters import to_native, to_bytes
from ctypes import CDLL, c_char_p, c_int, byref, POINTER, get_errno
try:
_selinux_lib = CDLL('libselinux.so.1', use_errno=True)
except OSError as ex:
raise ImportError('unable to load libselinux.so') from ex
def _module_setup():
def _check_rc(rc):
if rc < 0:
errno = get_errno()
raise OSError(errno, os.strerror(errno))
return rc
binary_char_type = type(b'')
class _to_char_p:
@classmethod
def from_param(cls, strvalue):
if strvalue is not None and not isinstance(strvalue, binary_char_type):
strvalue = to_bytes(strvalue)
return strvalue
# FIXME: swap restype to errcheck
_funcmap = dict(
is_selinux_enabled={},
is_selinux_mls_enabled={},
lgetfilecon_raw=dict(argtypes=[_to_char_p, POINTER(c_char_p)], restype=_check_rc),
# NB: matchpathcon is deprecated and should be rewritten on selabel_lookup (but will be a PITA)
matchpathcon=dict(argtypes=[_to_char_p, c_int, POINTER(c_char_p)], restype=_check_rc),
security_policyvers={},
selinux_getenforcemode=dict(argtypes=[POINTER(c_int)]),
security_getenforce={},
lsetfilecon=dict(argtypes=[_to_char_p, _to_char_p], restype=_check_rc),
selinux_getpolicytype=dict(argtypes=[POINTER(c_char_p)], restype=_check_rc),
)
_thismod = sys.modules[__name__]
for fname, cfg in _funcmap.items():
fn = getattr(_selinux_lib, fname, None)
if not fn:
raise ImportError('missing selinux function: {0}'.format(fname))
# all ctypes pointers share the same base type
base_ptr_type = type(POINTER(c_int))
fn.argtypes = cfg.get('argtypes', None)
fn.restype = cfg.get('restype', c_int)
# just patch simple directly callable functions directly onto the module
if not fn.argtypes or not any(argtype for argtype in fn.argtypes if type(argtype) is base_ptr_type):
setattr(_thismod, fname, fn)
continue
# NB: this validation code must run after all the wrappers have been declared
unimplemented_funcs = set(_funcmap).difference(dir(_thismod))
if unimplemented_funcs:
raise NotImplementedError('implementation is missing functions: {0}'.format(unimplemented_funcs))
# begin wrapper function impls
def selinux_getenforcemode():
enforcemode = c_int()
rc = _selinux_lib.selinux_getenforcemode(byref(enforcemode))
return [rc, enforcemode.value]
def selinux_getpolicytype():
con = c_char_p()
try:
rc = _selinux_lib.selinux_getpolicytype(byref(con))
return [rc, to_native(con.value)]
finally:
_selinux_lib.freecon(con)
def lgetfilecon_raw(path):
con = c_char_p()
try:
rc = _selinux_lib.lgetfilecon_raw(path, byref(con))
return [rc, to_native(con.value)]
finally:
_selinux_lib.freecon(con)
def matchpathcon(path, mode):
con = c_char_p()
try:
rc = _selinux_lib.matchpathcon(path, mode, byref(con))
return [rc, to_native(con.value)]
finally:
_selinux_lib.freecon(con)
_module_setup()
del _module_setup
# end wrapper function impls
| 3,488
|
Python
|
.py
| 83
| 35.180723
| 108
| 0.665284
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,311
|
version.py
|
ansible_ansible/lib/ansible/module_utils/compat/version.py
|
# Vendored copy of distutils/version.py from CPython 3.9.5
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* _cmp compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
from __future__ import annotations
import re
try:
RE_FLAGS = re.VERBOSE | re.ASCII # type: ignore[attr-defined]
except AttributeError:
RE_FLAGS = re.VERBOSE
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes; and route
rich comparisons to _cmp.
"""
def __init__(self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__(self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
def __eq__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c == 0
def __lt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c >= 0
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# _cmp (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion(Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
RE_FLAGS)
def parse(self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError("invalid version number '%s'" % vstring)
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(int, [major, minor, patch]))
else:
self.version = tuple(map(int, [major, minor])) + (0,)
if prerelease:
self.prerelease = (prerelease[0], int(prerelease_num))
else:
self.prerelease = None
def __str__(self):
if self.version[2] == 0:
vstring = '.'.join(map(str, self.version[0:2]))
else:
vstring = '.'.join(map(str, self.version))
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def _cmp(self, other):
if isinstance(other, str):
other = StrictVersion(other)
elif not isinstance(other, StrictVersion):
return NotImplemented
if self.version != other.version:
# numeric versions don't match
# prerelease stuff doesn't matter
if self.version < other.version:
return -1
else:
return 1
# have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
if self.prerelease == other.prerelease:
return 0
elif self.prerelease < other.prerelease:
return -1
else:
return 1
else:
raise AssertionError("never get here")
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking them.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion(Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__(self, vstring=None):
if vstring:
self.parse(vstring)
def parse(self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = [x for x in self.component_re.split(vstring) if x and x != '.']
for i, obj in enumerate(components):
try:
components[i] = int(obj)
except ValueError:
pass
self.version = components
def __str__(self):
return self.vstring
def __repr__(self):
return "LooseVersion ('%s')" % str(self)
def _cmp(self, other):
if isinstance(other, str):
other = LooseVersion(other)
elif not isinstance(other, LooseVersion):
return NotImplemented
if self.version == other.version:
return 0
if self.version < other.version:
return -1
if self.version > other.version:
return 1
# end class LooseVersion
| 12,734
|
Python
|
.py
| 292
| 37.280822
| 90
| 0.660265
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,312
|
importlib.py
|
ansible_ansible/lib/ansible/module_utils/compat/importlib.py
|
# Copyright (c) 2020 Matt Martz <matt@sivel.net>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import annotations
from ansible.module_utils.common.warnings import deprecate
def __getattr__(importable_name):
"""Inject import-time deprecation warnings.
Specifically, for ``import_module()``.
"""
if importable_name == 'import_module':
deprecate(
msg=f'The `ansible.module_utils.compat.importlib.'
f'{importable_name}` function is deprecated.',
version='2.19',
)
from importlib import import_module
return import_module
raise AttributeError(
f'cannot import name {importable_name !r} '
f'has no attribute ({__file__ !s})',
)
| 813
|
Python
|
.py
| 20
| 33.95
| 106
| 0.668361
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,313
|
typing.py
|
ansible_ansible/lib/ansible/module_utils/compat/typing.py
|
"""Compatibility layer for the `typing` module, providing all Python versions access to the newest type-hinting features."""
from __future__ import annotations
# pylint: disable=wildcard-import,unused-wildcard-import
# catch *all* exceptions to prevent type annotation support module bugs causing runtime failures
# (eg, https://github.com/ansible/ansible/issues/77857)
try:
from typing_extensions import *
except Exception: # pylint: disable=broad-except
pass
try:
from typing import * # type: ignore[assignment,no-redef]
except Exception: # pylint: disable=broad-except
pass
try:
cast # type: ignore[used-before-def]
except NameError:
def cast(typ, val): # type: ignore[no-redef]
return val
| 736
|
Python
|
.py
| 18
| 37.777778
| 124
| 0.755618
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,314
|
config.py
|
ansible_ansible/lib/ansible/cli/config.py
|
#!/usr/bin/env python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import os
import shlex
import subprocess
import sys
import yaml
from collections.abc import Mapping
from ansible import context
import ansible.plugins.loader as plugin_loader
from ansible import constants as C
from ansible.cli.arguments import option_helpers as opt_help
from ansible.config.manager import ConfigManager, Setting
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleRequiredOptionError
from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
from ansible.module_utils.common.json import json_dump
from ansible.module_utils.six import string_types
from ansible.parsing.quoting import is_quoted
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
display = Display()
_IGNORE_CHANGED = frozenset({'_terms', '_input'})
def yaml_dump(data, default_flow_style=False, default_style=None):
return yaml.dump(data, Dumper=AnsibleDumper, default_flow_style=default_flow_style, default_style=default_style)
def yaml_short(data):
return yaml_dump(data, default_flow_style=True, default_style="''")
def get_constants():
""" helper method to ensure we can template based on existing constants """
if not hasattr(get_constants, 'cvars'):
get_constants.cvars = {k: getattr(C, k) for k in dir(C) if not k.startswith('__')}
return get_constants.cvars
def _ansible_env_vars(varname):
""" return true or false depending if variable name is possibly a 'configurable' ansible env variable """
return all(
[
varname.startswith("ANSIBLE_"),
not varname.startswith(("ANSIBLE_TEST_", "ANSIBLE_LINT_")),
varname not in ("ANSIBLE_CONFIG", "ANSIBLE_DEV_HOME"),
]
)
def _get_evar_list(settings):
data = []
for setting in settings:
if 'env' in settings[setting] and settings[setting]['env']:
for varname in settings[setting]['env']:
data.append(varname.get('name'))
return data
def _get_ini_entries(settings):
data = {}
for setting in settings:
if 'ini' in settings[setting] and settings[setting]['ini']:
for kv in settings[setting]['ini']:
if not kv['section'] in data:
data[kv['section']] = set()
data[kv['section']].add(kv['key'])
return data
class ConfigCLI(CLI):
""" Config command line class """
name = 'ansible-config'
def __init__(self, args, callback=None):
self.config_file = None
self.config = None
super(ConfigCLI, self).__init__(args, callback)
def init_parser(self):
super(ConfigCLI, self).init_parser(
desc="View ansible configuration.",
)
common = opt_help.ArgumentParser(add_help=False)
opt_help.add_verbosity_options(common)
common.add_argument('-c', '--config', dest='config_file',
help="path to configuration file, defaults to first file found in precedence.")
common.add_argument("-t", "--type", action="store", default='base', dest='type', choices=['all', 'base'] + list(C.CONFIGURABLE_PLUGINS),
help="Filter down to a specific plugin type.")
common.add_argument('args', help='Specific plugin to target, requires type of plugin to be set', nargs='*')
subparsers = self.parser.add_subparsers(dest='action')
subparsers.required = True
list_parser = subparsers.add_parser('list', help='Print all config options', parents=[common])
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml'], default='yaml',
help='Output format for list')
dump_parser = subparsers.add_parser('dump', help='Dump configuration', parents=[common])
dump_parser.set_defaults(func=self.execute_dump)
dump_parser.add_argument('--only-changed', '--changed-only', dest='only_changed', action='store_true',
help="Only show configurations that have changed from the default")
dump_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml', 'display'], default='display',
help='Output format for dump')
view_parser = subparsers.add_parser('view', help='View configuration file', parents=[common])
view_parser.set_defaults(func=self.execute_view)
init_parser = subparsers.add_parser('init', help='Create initial configuration', parents=[common])
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env', 'vars'], default='ini',
help='Output format for init')
init_parser.add_argument('--disabled', dest='commented', action='store_true', default=False,
help='Prefixes all entries with a comment character to disable them')
validate_parser = subparsers.add_parser('validate',
help='Validate the configuration file and environment variables. '
'By default it only checks the base settings without accounting for plugins (see -t).',
parents=[common])
validate_parser.set_defaults(func=self.execute_validate)
validate_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env'] , default='ini',
help='Output format for init')
def post_process_args(self, options):
options = super(ConfigCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(ConfigCLI, self).run()
# initialize each galaxy server's options from known listed servers
self._galaxy_servers = [s for s in C.GALAXY_SERVER_LIST or [] if s] # clean list, reused later here
C.config.load_galaxy_server_defs(self._galaxy_servers)
if context.CLIARGS['config_file']:
self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
b_config = to_bytes(self.config_file)
if os.path.exists(b_config) and os.access(b_config, os.R_OK):
self.config = ConfigManager(self.config_file)
else:
raise AnsibleOptionsError('The provided configuration file is missing or not accessible: %s' % to_native(self.config_file))
else:
self.config = C.config
self.config_file = self.config._config_file
if self.config_file:
try:
if not os.path.exists(self.config_file):
raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
elif not os.path.isfile(self.config_file):
raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
except Exception:
if context.CLIARGS['action'] in ['view']:
raise
elif context.CLIARGS['action'] in ['edit', 'update']:
display.warning("File does not exist, used empty file: %s" % self.config_file)
elif context.CLIARGS['action'] == 'view':
raise AnsibleError('Invalid or no config file was supplied')
# run the requested action
context.CLIARGS['func']()
def execute_update(self):
"""
Updates a single setting in the specified ansible.cfg
"""
raise AnsibleError("Option not implemented yet")
# pylint: disable=unreachable
if context.CLIARGS['setting'] is None:
raise AnsibleOptionsError("update option requires a setting to update")
(entry, value) = context.CLIARGS['setting'].split('=')
if '.' in entry:
(section, option) = entry.split('.')
else:
section = 'defaults'
option = entry
subprocess.call([
'ansible',
'-m', 'ini_file',
'localhost',
'-c', 'local',
'-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
])
def execute_view(self):
"""
Displays the current config file
"""
try:
with open(self.config_file, 'rb') as f:
self.pager(to_text(f.read(), errors='surrogate_or_strict'))
except Exception as e:
raise AnsibleError("Failed to open config file: %s" % to_native(e))
def execute_edit(self):
"""
Opens ansible.cfg in the default EDITOR
"""
raise AnsibleError("Option not implemented yet")
# pylint: disable=unreachable
try:
editor = shlex.split(C.config.get_config_value('EDITOR'))
editor.append(self.config_file)
subprocess.call(editor)
except Exception as e:
raise AnsibleError("Failed to open editor: %s" % to_native(e))
def _list_plugin_settings(self, ptype, plugins=None):
entries = {}
loader = getattr(plugin_loader, '%s_loader' % ptype)
# build list
if plugins:
plugin_cs = []
for plugin in plugins:
p = loader.get(plugin, class_only=True)
if p is None:
display.warning("Skipping %s as we could not find matching plugin" % plugin)
else:
plugin_cs.append(p)
else:
plugin_cs = loader.all(class_only=True)
# iterate over class instances
for plugin in plugin_cs:
finalname = name = plugin._load_name
if name.startswith('_'):
# alias or deprecated
if os.path.islink(plugin._original_path):
continue
else:
finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
entries[finalname] = self.config.get_configuration_definitions(ptype, name)
return entries
def _list_entries_from_args(self):
"""
build a dict with the list requested configs
"""
config_entries = {}
if context.CLIARGS['type'] in ('base', 'all'):
# this dumps main/common configs
config_entries = self.config.get_configuration_definitions(ignore_private=True)
# for base and all, we include galaxy servers
config_entries['GALAXY_SERVERS'] = {}
for server in self._galaxy_servers:
config_entries['GALAXY_SERVERS'][server] = self.config.get_configuration_definitions('galaxy_server', server)
if context.CLIARGS['type'] != 'base':
config_entries['PLUGINS'] = {}
if context.CLIARGS['type'] == 'all':
# now each plugin type
for ptype in C.CONFIGURABLE_PLUGINS:
config_entries['PLUGINS'][ptype.upper()] = self._list_plugin_settings(ptype)
elif context.CLIARGS['type'] != 'base':
# only for requested types
config_entries['PLUGINS'][context.CLIARGS['type']] = self._list_plugin_settings(context.CLIARGS['type'], context.CLIARGS['args'])
return config_entries
def execute_list(self):
"""
list and output available configs
"""
config_entries = self._list_entries_from_args()
if context.CLIARGS['format'] == 'yaml':
output = yaml_dump(config_entries)
elif context.CLIARGS['format'] == 'json':
output = json_dump(config_entries)
self.pager(to_text(output, errors='surrogate_or_strict'))
def _get_settings_vars(self, settings, subkey):
data = []
if context.CLIARGS['commented']:
prefix = '#'
else:
prefix = ''
for setting in settings:
if not settings[setting].get('description'):
continue
default = self.config.template_default(settings[setting].get('default', ''), get_constants())
if subkey == 'env':
stype = settings[setting].get('type', '')
if stype == 'boolean':
if default:
default = '1'
else:
default = '0'
elif default:
if stype == 'list':
if not isinstance(default, string_types):
# python lists are not valid env ones
try:
default = ', '.join(default)
except Exception as e:
# list of other stuff
default = '%s' % to_native(default)
if isinstance(default, string_types) and not is_quoted(default):
default = shlex.quote(default)
elif default is None:
default = ''
if subkey in settings[setting] and settings[setting][subkey]:
entry = settings[setting][subkey][-1]['name']
if isinstance(settings[setting]['description'], string_types):
desc = settings[setting]['description']
else:
desc = '\n#'.join(settings[setting]['description'])
name = settings[setting].get('name', setting)
data.append('# %s(%s): %s' % (name, settings[setting].get('type', 'string'), desc))
# TODO: might need quoting and value coercion depending on type
if subkey == 'env':
if entry.startswith('_ANSIBLE_'):
continue
data.append('%s%s=%s' % (prefix, entry, default))
elif subkey == 'vars':
if entry.startswith('_ansible_'):
continue
data.append(prefix + '%s: %s' % (entry, to_text(yaml_short(default), errors='surrogate_or_strict')))
data.append('')
return data
def _get_settings_ini(self, settings, seen):
sections = {}
for o in sorted(settings.keys()):
opt = settings[o]
if not isinstance(opt, Mapping):
# recursed into one of the few settings that is a mapping, now hitting it's strings
continue
if not opt.get('description'):
# its a plugin
new_sections = self._get_settings_ini(opt, seen)
for s in new_sections:
if s in sections:
sections[s].extend(new_sections[s])
else:
sections[s] = new_sections[s]
continue
if isinstance(opt['description'], string_types):
desc = '# (%s) %s' % (opt.get('type', 'string'), opt['description'])
else:
desc = "# (%s) " % opt.get('type', 'string')
desc += "\n# ".join(opt['description'])
if 'ini' in opt and opt['ini']:
entry = opt['ini'][-1]
if entry['section'] not in seen:
seen[entry['section']] = []
if entry['section'] not in sections:
sections[entry['section']] = []
# avoid dupes
if entry['key'] not in seen[entry['section']]:
seen[entry['section']].append(entry['key'])
default = self.config.template_default(opt.get('default', ''), get_constants())
if opt.get('type', '') == 'list' and not isinstance(default, string_types):
# python lists are not valid ini ones
default = ', '.join(default)
elif default is None:
default = ''
if context.CLIARGS.get('commented', False):
entry['key'] = ';%s' % entry['key']
key = desc + '\n%s=%s' % (entry['key'], default)
sections[entry['section']].append(key)
return sections
def execute_init(self):
"""Create initial configuration"""
seen = {}
data = []
config_entries = self._list_entries_from_args()
plugin_types = config_entries.pop('PLUGINS', None)
if context.CLIARGS['format'] == 'ini':
sections = self._get_settings_ini(config_entries, seen)
if plugin_types:
for ptype in plugin_types:
plugin_sections = self._get_settings_ini(plugin_types[ptype], seen)
for s in plugin_sections:
if s in sections:
sections[s].extend(plugin_sections[s])
else:
sections[s] = plugin_sections[s]
if sections:
for section in sections.keys():
data.append('[%s]' % section)
for key in sections[section]:
data.append(key)
data.append('')
data.append('')
elif context.CLIARGS['format'] in ('env', 'vars'): # TODO: add yaml once that config option is added
data = self._get_settings_vars(config_entries, context.CLIARGS['format'])
if plugin_types:
for ptype in plugin_types:
for plugin in plugin_types[ptype].keys():
data.extend(self._get_settings_vars(plugin_types[ptype][plugin], context.CLIARGS['format']))
self.pager(to_text('\n'.join(data), errors='surrogate_or_strict'))
def _render_settings(self, config):
entries = []
for setting in sorted(config):
changed = (config[setting].origin not in ('default', 'REQUIRED') and setting not in _IGNORE_CHANGED)
if context.CLIARGS['format'] == 'display':
if isinstance(config[setting], Setting):
# proceed normally
value = config[setting].value
if config[setting].origin == 'default' or setting in _IGNORE_CHANGED:
color = 'green'
value = self.config.template_default(value, get_constants())
elif config[setting].origin == 'REQUIRED':
# should include '_terms', '_input', etc
color = 'red'
else:
color = 'yellow'
msg = "%s(%s) = %s" % (setting, config[setting].origin, value)
else:
color = 'green'
msg = "%s(%s) = %s" % (setting, 'default', config[setting].get('default'))
entry = stringc(msg, color)
else:
entry = {}
for key in config[setting]._fields:
if key == 'type':
continue
entry[key] = getattr(config[setting], key)
if not context.CLIARGS['only_changed'] or changed:
entries.append(entry)
return entries
def _get_global_configs(self):
# Add base
config = self.config.get_configuration_definitions(ignore_private=True)
# convert to settings
for setting in config.keys():
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants())
config[setting] = Setting(setting, v, o, None)
return self._render_settings(config)
def _get_plugin_configs(self, ptype, plugins):
# prep loading
loader = getattr(plugin_loader, '%s_loader' % ptype)
# accumulators
output = []
config_entries = {}
# build list
if plugins:
plugin_cs = []
for plugin in plugins:
p = loader.get(plugin, class_only=True)
if p is None:
display.warning("Skipping %s as we could not find matching plugin" % plugin)
else:
plugin_cs.append(loader.get(plugin, class_only=True))
else:
plugin_cs = loader.all(class_only=True)
for plugin in plugin_cs:
# in case of deprecation they diverge
finalname = name = plugin._load_name
if name.startswith('_'):
if os.path.islink(plugin._original_path):
# skip alias
continue
# deprecated, but use 'nice name'
finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
# default entries per plugin
config_entries[finalname] = self.config.get_configuration_definitions(ptype, name)
try:
# populate config entries by loading plugin
dump = loader.get(name, class_only=True)
except Exception as e:
display.warning('Skipping "%s" %s plugin, as we cannot load plugin to check config due to : %s' % (name, ptype, to_native(e)))
continue
# actually get the values
for setting in config_entries[finalname].keys():
try:
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, plugin_type=ptype, plugin_name=name, variables=get_constants())
except AnsibleRequiredOptionError:
v = None
o = 'REQUIRED'
if v is None and o is None:
# not all cases will be error
o = 'REQUIRED'
config_entries[finalname][setting] = Setting(setting, v, o, None)
# pretty please!
results = self._render_settings(config_entries[finalname])
if results:
if context.CLIARGS['format'] == 'display':
# avoid header for empty lists (only changed!)
output.append('\n%s:\n%s' % (finalname, '_' * len(finalname)))
output.extend(results)
else:
output.append({finalname: results})
return output
def _get_galaxy_server_configs(self):
output = []
# add galaxy servers
for server in self._galaxy_servers:
server_config = {}
s_config = self.config.get_configuration_definitions('galaxy_server', server)
for setting in s_config.keys():
try:
v, o = C.config.get_config_value_and_origin(setting, plugin_type='galaxy_server', plugin_name=server, cfile=self.config_file)
except AnsibleError as e:
if s_config[setting].get('required', False):
v = None
o = 'REQUIRED'
else:
raise e
if v is None and o is None:
# not all cases will be error
o = 'REQUIRED'
server_config[setting] = Setting(setting, v, o, None)
if context.CLIARGS['format'] == 'display':
if not context.CLIARGS['only_changed'] or server_config:
equals = '=' * len(server)
output.append(f'\n{server}\n{equals}')
output.extend(self._render_settings(server_config))
else:
output.append({server: server_config})
return output
def execute_dump(self):
"""
Shows the current settings, merges ansible.cfg if specified
"""
output = []
if context.CLIARGS['type'] in ('base', 'all'):
# deal with base
output = self._get_global_configs()
# add galaxy servers
server_config_list = self._get_galaxy_server_configs()
if context.CLIARGS['format'] == 'display':
output.append('\nGALAXY_SERVERS:\n')
output.extend(server_config_list)
else:
configs = {}
for server_config in server_config_list:
server = list(server_config.keys())[0]
server_reduced_config = server_config.pop(server)
configs[server] = server_reduced_config
output.append({'GALAXY_SERVERS': configs})
if context.CLIARGS['type'] == 'all':
# add all plugins
for ptype in C.CONFIGURABLE_PLUGINS:
plugin_list = self._get_plugin_configs(ptype, context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
if not context.CLIARGS['only_changed'] or plugin_list:
output.append('\n%s:\n%s' % (ptype.upper(), '=' * len(ptype)))
output.extend(plugin_list)
else:
if ptype in ('modules', 'doc_fragments'):
pname = ptype.upper()
else:
pname = '%s_PLUGINS' % ptype.upper()
output.append({pname: plugin_list})
elif context.CLIARGS['type'] != 'base':
# deal with specific plugin
output = self._get_plugin_configs(context.CLIARGS['type'], context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
text = '\n'.join(output)
if context.CLIARGS['format'] == 'yaml':
text = yaml_dump(output)
elif context.CLIARGS['format'] == 'json':
text = json_dump(output)
self.pager(to_text(text, errors='surrogate_or_strict'))
def execute_validate(self):
found = False
config_entries = self._list_entries_from_args()
plugin_types = config_entries.pop('PLUGINS', None)
galaxy_servers = config_entries.pop('GALAXY_SERVERS', None)
if context.CLIARGS['format'] == 'ini':
if C.CONFIG_FILE is not None:
# validate ini config since it is found
sections = _get_ini_entries(config_entries)
# Also from plugins
if plugin_types:
for ptype in plugin_types:
for plugin in plugin_types[ptype].keys():
plugin_sections = _get_ini_entries(plugin_types[ptype][plugin])
for s in plugin_sections:
if s in sections:
sections[s].update(plugin_sections[s])
else:
sections[s] = plugin_sections[s]
if galaxy_servers:
for server in galaxy_servers:
server_sections = _get_ini_entries(galaxy_servers[server])
for s in server_sections:
if s in sections:
sections[s].update(server_sections[s])
else:
sections[s] = server_sections[s]
if sections:
p = C.config._parsers[C.CONFIG_FILE]
for s in p.sections():
# check for valid sections
if s not in sections:
display.error(f"Found unknown section '{s}' in '{C.CONFIG_FILE}.")
found = True
continue
# check keys in valid sections
for k in p.options(s):
if k not in sections[s]:
display.error(f"Found unknown key '{k}' in section '{s}' in '{C.CONFIG_FILE}.")
found = True
elif context.CLIARGS['format'] == 'env':
# validate any 'ANSIBLE_' env vars found
evars = [varname for varname in os.environ.keys() if _ansible_env_vars(varname)]
if evars:
data = _get_evar_list(config_entries)
if plugin_types:
for ptype in plugin_types:
for plugin in plugin_types[ptype].keys():
data.extend(_get_evar_list(plugin_types[ptype][plugin]))
for evar in evars:
if evar not in data:
display.error(f"Found unknown environment variable '{evar}'.")
found = True
# we found discrepancies!
if found:
sys.exit(1)
# allsgood
display.display("All configurations seem valid!")
def main(args=None):
ConfigCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 29,667
|
Python
|
.py
| 588
| 35.734694
| 160
| 0.544415
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,315
|
adhoc.py
|
ansible_ansible/lib/ansible/cli/adhoc.py
|
#!/usr/bin/env python
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils.common.text.converters import to_text
from ansible.parsing.splitter import parse_kv
from ansible.parsing.utils.yaml import from_yaml
from ansible.playbook import Playbook
from ansible.playbook.play import Play
from ansible.utils.display import Display
display = Display()
class AdHocCLI(CLI):
""" is an extra-simple tool/framework/API for doing 'remote things'.
this command allows you to define and run a single task 'playbook' against a set of hosts
"""
name = 'ansible'
def init_parser(self):
""" create an options parser for bin/ansible """
super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
desc="Define and run a single task 'playbook' against a set of hosts",
epilog="Some actions do not make sense in Ad-Hoc (include, meta, etc)")
opt_help.add_runas_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_async_options(self.parser)
opt_help.add_output_options(self.parser)
opt_help.add_connect_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
opt_help.add_tasknoplay_options(self.parser)
# options unique to ansible ad-hoc
self.parser.add_argument('-a', '--args', dest='module_args',
help="The action's options in space separated k=v format: -a 'opt1=val1 opt2=val2' "
"or a json string: -a '{\"opt1\": \"val1\", \"opt2\": \"val2\"}'",
default=C.DEFAULT_MODULE_ARGS)
self.parser.add_argument('-m', '--module-name', dest='module_name',
help="Name of the action to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
self.parser.add_argument('args', metavar='pattern', help='host pattern')
def post_process_args(self, options):
"""Post process and validate options for bin/ansible """
options = super(AdHocCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, fork_opts=True)
return options
def _play_ds(self, pattern, async_val, poll):
check_raw = context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS
module_args_raw = context.CLIARGS['module_args']
module_args = None
if module_args_raw and module_args_raw.startswith('{') and module_args_raw.endswith('}'):
try:
module_args = from_yaml(module_args_raw.strip(), json_only=True)
except AnsibleParserError:
pass
if not module_args:
module_args = parse_kv(module_args_raw, check_raw=check_raw)
mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': module_args},
'timeout': context.CLIARGS['task_timeout']}
# avoid adding to tasks that don't support it, unless set, then give user an error
if context.CLIARGS['module_name'] not in C._ACTION_ALL_INCLUDE_ROLE_TASKS and any(frozenset((async_val, poll))):
mytask['async_val'] = async_val
mytask['poll'] = poll
return dict(
name="Ansible Ad-Hoc",
hosts=pattern,
gather_facts='no',
tasks=[mytask])
def run(self):
""" create and execute the single task playbook """
super(AdHocCLI, self).run()
# only thing left should be host pattern
pattern = to_text(context.CLIARGS['args'], errors='surrogate_or_strict')
# handle password prompts
sshpass = None
becomepass = None
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
# get basic objects
loader, inventory, variable_manager = self._play_prereqs()
# get list of hosts to execute against
try:
hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
except AnsibleError:
if context.CLIARGS['subset']:
raise
else:
hosts = []
display.warning("No hosts matched, nothing to do")
# just listing hosts?
if context.CLIARGS['listhosts']:
display.display(' hosts (%d):' % len(hosts))
for host in hosts:
display.display(' %s' % host)
return 0
# verify we have arguments if we know we need em
if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
err = "No argument passed to %s module" % context.CLIARGS['module_name']
if pattern.endswith(".yml"):
err = err + ' (did you mean to run ansible-playbook?)'
raise AnsibleOptionsError(err)
# Avoid modules that don't work with ad-hoc
if context.CLIARGS['module_name'] in C._ACTION_IMPORT_PLAYBOOK:
raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
% context.CLIARGS['module_name'])
# construct playbook objects to wrap task
play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
# used in start callback
playbook = Playbook(loader)
playbook._entries.append(play)
playbook._file_name = '__adhoc_playbook__'
if self.callback:
cb = self.callback
elif context.CLIARGS['one_line']:
cb = 'oneline'
# Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
cb = C.DEFAULT_STDOUT_CALLBACK
else:
cb = 'minimal'
run_tree = False
if context.CLIARGS['tree']:
C.CALLBACKS_ENABLED.append('tree')
C.TREE_DIR = context.CLIARGS['tree']
run_tree = True
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=run_tree,
forks=context.CLIARGS['forks'],
)
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', playbook)
result = self._tqm.run(play)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
finally:
if self._tqm:
self._tqm.cleanup()
if loader:
loader.cleanup_all_tmp_files()
return result
def main(args=None):
AdHocCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 8,194
|
Python
|
.py
| 164
| 38.939024
| 120
| 0.617301
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,316
|
pull.py
|
ansible_ansible/lib/ansible/cli/pull.py
|
#!/usr/bin/env python
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import datetime
import os
import platform
import secrets
import shlex
import shutil
import socket
import sys
import time
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.plugins.loader import module_loader
from ansible.utils.cmd_functions import run_cmd
from ansible.utils.display import Display
display = Display()
class PullCLI(CLI):
""" Used to pull a remote copy of ansible on each managed node,
each set to run via cron and update playbook source via a source repository.
This inverts the default *push* architecture of ansible into a *pull* architecture,
which has near-limitless scaling potential.
None of the CLI tools are designed to run concurrently with themselves,
you should use an external scheduler and/or locking to ensure there are no clashing operations.
The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull.
This is useful both for extreme scale-out as well as periodic remediation.
Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an
excellent way to gather and analyze remote logs from ansible-pull.
"""
name = 'ansible-pull'
DEFAULT_REPO_TYPE = 'git'
DEFAULT_PLAYBOOK = 'local.yml'
REPO_CHOICES = ('git', 'subversion', 'hg', 'bzr')
PLAYBOOK_ERRORS = {
1: 'File does not exist',
2: 'File is not readable',
}
ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook. '
'This can be a relative path within the checkout. By default, Ansible will '
"look for a playbook based on the host's fully-qualified domain name, "
'on the host hostname and finally a playbook named *local.yml*.', }
SKIP_INVENTORY_DEFAULTS = True
@staticmethod
def _get_inv_cli():
inv_opts = ''
if context.CLIARGS.get('inventory', False):
for inv in context.CLIARGS['inventory']:
if isinstance(inv, list):
inv_opts += " -i '%s' " % ','.join(inv)
elif ',' in inv or os.path.exists(inv):
inv_opts += ' -i %s ' % inv
return inv_opts
def init_parser(self):
""" create an options parser for bin/ansible """
super(PullCLI, self).init_parser(
usage='%prog -U <repository> [options] [<playbook.yml>]',
desc="pulls playbooks from a VCS repo and executes them on target host")
# Do not add check_options as there's a conflict with --checkout/-C
opt_help.add_connect_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_subset_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_runas_prompt_options(self.parser)
self.parser.add_argument('args', help='Playbook(s)', metavar='playbook.yml', nargs='*')
# options unique to pull
self.parser.add_argument('--purge', default=False, action='store_true', help='purge checkout after playbook run')
self.parser.add_argument('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
help='only run the playbook if the repository has been updated')
self.parser.add_argument('-s', '--sleep', dest='sleep', default=None,
help='sleep for random interval (between 0 and n number of seconds) before starting. '
'This is a useful way to disperse git requests')
self.parser.add_argument('-f', '--force', dest='force', default=False, action='store_true',
help='run the playbook even if the repository could not be updated')
self.parser.add_argument('-d', '--directory', dest='dest', default=None, type=opt_help.unfrack_path(),
help='path to the directory to which Ansible will checkout the repository.')
self.parser.add_argument('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
self.parser.add_argument('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.')
self.parser.add_argument('-C', '--checkout', dest='checkout',
help='branch/tag/commit to checkout. Defaults to behavior of repository module.')
self.parser.add_argument('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
help='adds the hostkey for the repo url if not already added')
self.parser.add_argument('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
help='Repository module name, which ansible will use to check out the repo. Choices are %s. Default is %s.'
% (self.REPO_CHOICES, self.DEFAULT_REPO_TYPE))
self.parser.add_argument('--verify-commit', dest='verify', default=False, action='store_true',
help='verify GPG signature of checked out commit, if it fails abort running the playbook. '
'This needs the corresponding VCS module to support such an operation')
self.parser.add_argument('--clean', dest='clean', default=False, action='store_true',
help='modified files in the working repository will be discarded')
self.parser.add_argument('--track-subs', dest='tracksubs', default=False, action='store_true',
help='submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update')
# add a subset of the check_opts flag group manually, as the full set's
# shortcodes conflict with above --checkout/-C
self.parser.add_argument("--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
self.parser.add_argument("--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
def post_process_args(self, options):
options = super(PullCLI, self).post_process_args(options)
if not options.dest:
hostname = socket.getfqdn()
# use a hostname dependent directory, in case of $HOME on nfs
options.dest = os.path.join(C.ANSIBLE_HOME, 'pull', hostname)
if os.path.exists(options.dest) and not os.path.isdir(options.dest):
raise AnsibleOptionsError("%s is not a valid or accessible directory." % options.dest)
if options.sleep:
try:
secs = secrets.randbelow(int(options.sleep))
options.sleep = secs
except ValueError:
raise AnsibleOptionsError("%s is not a number." % options.sleep)
if not options.url:
raise AnsibleOptionsError("URL for repository not specified, use -h for help")
if options.module_name not in self.REPO_CHOICES:
raise AnsibleOptionsError("Unsupported repo module %s, choices are %s" % (options.module_name, ','.join(self.REPO_CHOICES)))
display.verbosity = options.verbosity
self.validate_conflicts(options)
return options
def run(self):
""" use Runner lib to do SSH things """
super(PullCLI, self).run()
# log command line
now = datetime.datetime.now()
display.display(now.strftime("Starting Ansible Pull at %F %T"))
display.display(' '.join(sys.argv))
# Build Checkout command
# Now construct the ansible command
node = platform.node()
host = socket.getfqdn()
hostnames = ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
if hostnames:
limit_opts = 'localhost,%s,127.0.0.1' % hostnames
else:
limit_opts = 'localhost,127.0.0.1'
base_opts = '-c local '
if context.CLIARGS['verbosity'] > 0:
base_opts += ' -%s' % ''.join(["v" for x in range(0, context.CLIARGS['verbosity'])])
# Attempt to use the inventory passed in as an argument
# It might not yet have been downloaded so use localhost as default
inv_opts = self._get_inv_cli()
if not inv_opts:
inv_opts = " -i localhost, "
# avoid interpreter discovery since we already know which interpreter to use on localhost
inv_opts += '-e %s ' % shlex.quote('ansible_python_interpreter=%s' % sys.executable)
# SCM specific options
if context.CLIARGS['module_name'] == 'git':
repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
if context.CLIARGS['checkout']:
repo_opts += ' version=%s' % context.CLIARGS['checkout']
if context.CLIARGS['accept_host_key']:
repo_opts += ' accept_hostkey=yes'
if context.CLIARGS['private_key_file']:
repo_opts += ' key_file=%s' % context.CLIARGS['private_key_file']
if context.CLIARGS['verify']:
repo_opts += ' verify_commit=yes'
if context.CLIARGS['tracksubs']:
repo_opts += ' track_submodules=yes'
if not context.CLIARGS['fullclone']:
repo_opts += ' depth=1'
elif context.CLIARGS['module_name'] == 'subversion':
repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
if context.CLIARGS['checkout']:
repo_opts += ' revision=%s' % context.CLIARGS['checkout']
if not context.CLIARGS['fullclone']:
repo_opts += ' export=yes'
elif context.CLIARGS['module_name'] == 'hg':
repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
if context.CLIARGS['checkout']:
repo_opts += ' revision=%s' % context.CLIARGS['checkout']
elif context.CLIARGS['module_name'] == 'bzr':
repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
if context.CLIARGS['checkout']:
repo_opts += ' version=%s' % context.CLIARGS['checkout']
else:
raise AnsibleOptionsError('Unsupported (%s) SCM module for pull, choices are: %s'
% (context.CLIARGS['module_name'],
','.join(self.REPO_CHOICES)))
# options common to all supported SCMS
if context.CLIARGS['clean']:
repo_opts += ' force=yes'
path = module_loader.find_plugin(context.CLIARGS['module_name'])
if path is None:
raise AnsibleOptionsError(("module '%s' not found.\n" % context.CLIARGS['module_name']))
bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
# hardcode local and inventory/host as this is just meant to fetch the repo
cmd = '%s/ansible %s %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts,
context.CLIARGS['module_name'],
repo_opts, limit_opts)
for ev in context.CLIARGS['extra_vars']:
cmd += ' -e %s' % shlex.quote(ev)
# Nap?
if context.CLIARGS['sleep']:
display.display("Sleeping for %d seconds..." % context.CLIARGS['sleep'])
time.sleep(context.CLIARGS['sleep'])
# RUN the Checkout command
display.debug("running ansible with VCS module to checkout repo")
display.vvvv('EXEC: %s' % cmd)
rc, b_out, b_err = run_cmd(cmd, live=True)
if rc != 0:
if context.CLIARGS['force']:
display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
else:
return rc
elif context.CLIARGS['ifchanged'] and b'"changed": true' not in b_out:
display.display("Repository has not changed, quitting.")
return 0
playbook = self.select_playbook(context.CLIARGS['dest'])
if playbook is None:
raise AnsibleOptionsError("Could not find a playbook to run.")
# Build playbook command
cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
if context.CLIARGS['vault_password_files']:
for vault_password_file in context.CLIARGS['vault_password_files']:
cmd += " --vault-password-file=%s" % vault_password_file
if context.CLIARGS['vault_ids']:
for vault_id in context.CLIARGS['vault_ids']:
cmd += " --vault-id=%s" % vault_id
if context.CLIARGS['become_password_file']:
cmd += " --become-password-file=%s" % context.CLIARGS['become_password_file']
if context.CLIARGS['connection_password_file']:
cmd += " --connection-password-file=%s" % context.CLIARGS['connection_password_file']
for ev in context.CLIARGS['extra_vars']:
cmd += ' -e %s' % shlex.quote(ev)
if context.CLIARGS['become_ask_pass']:
cmd += ' --ask-become-pass'
if context.CLIARGS['skip_tags']:
cmd += ' --skip-tags "%s"' % to_native(u','.join(context.CLIARGS['skip_tags']))
if context.CLIARGS['tags']:
cmd += ' -t "%s"' % to_native(u','.join(context.CLIARGS['tags']))
if context.CLIARGS['subset']:
cmd += ' -l "%s"' % context.CLIARGS['subset']
else:
cmd += ' -l "%s"' % limit_opts
if context.CLIARGS['check']:
cmd += ' -C'
if context.CLIARGS['diff']:
cmd += ' -D'
os.chdir(context.CLIARGS['dest'])
# redo inventory options as new files might exist now
inv_opts = self._get_inv_cli()
if inv_opts:
cmd += inv_opts
# RUN THE PLAYBOOK COMMAND
display.debug("running ansible-playbook to do actual work")
display.debug('EXEC: %s' % cmd)
rc, b_out, b_err = run_cmd(cmd, live=True)
if context.CLIARGS['purge']:
os.chdir('/')
try:
display.debug("removing: %s" % context.CLIARGS['dest'])
shutil.rmtree(context.CLIARGS['dest'])
except Exception as e:
display.error(u"Failed to remove %s: %s" % (context.CLIARGS['dest'], to_text(e)))
return rc
@staticmethod
def try_playbook(path):
if not os.path.exists(path):
return 1
if not os.access(path, os.R_OK):
return 2
return 0
@staticmethod
def select_playbook(path):
playbook = None
errors = []
if context.CLIARGS['args'] and context.CLIARGS['args'][0] is not None:
playbooks = []
for book in context.CLIARGS['args']:
book_path = os.path.join(path, book)
rc = PullCLI.try_playbook(book_path)
if rc != 0:
errors.append("%s: %s" % (book_path, PullCLI.PLAYBOOK_ERRORS[rc]))
continue
playbooks.append(book_path)
if 0 < len(errors):
display.warning("\n".join(errors))
elif len(playbooks) == len(context.CLIARGS['args']):
playbook = " ".join(playbooks)
return playbook
else:
fqdn = socket.getfqdn()
hostpb = os.path.join(path, fqdn + '.yml')
shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
localpb = os.path.join(path, PullCLI.DEFAULT_PLAYBOOK)
for pb in [hostpb, shorthostpb, localpb]:
rc = PullCLI.try_playbook(pb)
if rc == 0:
playbook = pb
break
else:
errors.append("%s: %s" % (pb, PullCLI.PLAYBOOK_ERRORS[rc]))
if playbook is None:
display.warning("\n".join(errors))
return playbook
def main(args=None):
PullCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 17,287
|
Python
|
.py
| 309
| 43.676375
| 157
| 0.593817
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,317
|
vault.py
|
ansible_ansible/lib/ansible/cli/vault.py
|
#!/usr/bin/env python
# (c) 2014, James Tanner <tanner.jc@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import os
import sys
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret
from ansible.utils.display import Display
display = Display()
class VaultCLI(CLI):
""" can encrypt any structured data file used by Ansible.
This can include *group_vars/* or *host_vars/* inventory variables,
variables loaded by *include_vars* or *vars_files*, or variable files
passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
Role variables and defaults are also included!
Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
"""
name = 'ansible-vault'
FROM_STDIN = "stdin"
FROM_ARGS = "the command line args"
FROM_PROMPT = "the interactive prompt"
def __init__(self, args):
self.b_vault_pass = None
self.b_new_vault_pass = None
self.encrypt_string_read_stdin = False
self.encrypt_secret = None
self.encrypt_vault_id = None
self.new_encrypt_secret = None
self.new_encrypt_vault_id = None
super(VaultCLI, self).__init__(args)
def init_parser(self):
super(VaultCLI, self).init_parser(
desc="encryption/decryption utility for Ansible data files",
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
common = opt_help.ArgumentParser(add_help=False)
opt_help.add_vault_options(common)
opt_help.add_verbosity_options(common)
subparsers = self.parser.add_subparsers(dest='action')
subparsers.required = True
output = opt_help.ArgumentParser(add_help=False)
output.add_argument('--output', default=None, dest='output_file',
help='output file name for encrypt or decrypt; use - for stdout',
type=opt_help.unfrack_path())
# For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
vault_id = opt_help.ArgumentParser(add_help=False)
vault_id.add_argument('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
action='store', type=str,
help='the vault id used to encrypt (required if more than one vault-id is provided)')
create_parser = subparsers.add_parser('create', help='Create new vault encrypted file', parents=[vault_id, common])
create_parser.set_defaults(func=self.execute_create)
create_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
create_parser.add_argument('--skip-tty-check', default=False, help='allows editor to be opened when no tty attached',
dest='skip_tty_check', action='store_true')
decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file', parents=[output, common])
decrypt_parser.set_defaults(func=self.execute_decrypt)
decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
edit_parser = subparsers.add_parser('edit', help='Edit vault encrypted file', parents=[vault_id, common])
edit_parser.set_defaults(func=self.execute_edit)
edit_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
view_parser = subparsers.add_parser('view', help='View vault encrypted file', parents=[common])
view_parser.set_defaults(func=self.execute_view)
view_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt YAML file', parents=[common, output, vault_id])
encrypt_parser.set_defaults(func=self.execute_encrypt)
encrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
enc_str_parser = subparsers.add_parser('encrypt_string', help='Encrypt a string', parents=[common, output, vault_id])
enc_str_parser.set_defaults(func=self.execute_encrypt_string)
enc_str_parser.add_argument('args', help='String to encrypt', metavar='string_to_encrypt', nargs='*')
enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
action='store_true',
help="Prompt for the string to encrypt")
enc_str_parser.add_argument('--show-input', dest='show_string_input', default=False, action='store_true',
help='Do not hide input when prompted for the string to encrypt')
enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
action='append',
help="Specify the variable name")
enc_str_parser.add_argument('--stdin-name', dest='encrypt_string_stdin_name',
default=None,
help="Specify the variable name for stdin")
rekey_parser = subparsers.add_parser('rekey', help='Re-key a vault encrypted file', parents=[common, vault_id])
rekey_parser.set_defaults(func=self.execute_rekey)
rekey_new_group = rekey_parser.add_mutually_exclusive_group()
rekey_new_group.add_argument('--new-vault-password-file', default=None, dest='new_vault_password_file',
help="new vault password file for rekey", type=opt_help.unfrack_path())
rekey_new_group.add_argument('--new-vault-id', default=None, dest='new_vault_id', type=str,
help='the new vault identity to use for rekey')
rekey_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
def post_process_args(self, options):
options = super(VaultCLI, self).post_process_args(options)
display.verbosity = options.verbosity
if options.vault_ids:
for vault_id in options.vault_ids:
if u';' in vault_id:
raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
if getattr(options, 'output_file', None) and len(options.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
if options.action == 'encrypt_string':
if '-' in options.args or not options.args or options.encrypt_string_stdin_name:
self.encrypt_string_read_stdin = True
# TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
return options
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
# set default restrictive umask
old_umask = os.umask(0o077)
vault_ids = list(context.CLIARGS['vault_ids'])
# there are 3 types of actions, those that just 'read' (decrypt, view) and only
# need to ask for a password once, and those that 'write' (create, encrypt) that
# ask for a new password and confirm it, and 'read/write (rekey) that asks for the
# old password, then asks for a new one and confirms it.
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
action = context.CLIARGS['action']
# TODO: instead of prompting for these before, we could let VaultEditor
# call a callback when it needs it.
if action in ['decrypt', 'view', 'rekey', 'edit']:
vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'])
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
if action in ['encrypt', 'encrypt_string', 'create']:
encrypt_vault_id = None
# no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
if action not in ['edit']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
vault_secrets = None
vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
create_new_password=True)
if len(vault_secrets) > 1 and not encrypt_vault_id:
raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
','.join([x[0] for x in vault_secrets]))
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
encrypt_secret = match_encrypt_secret(vault_secrets,
encrypt_vault_id=encrypt_vault_id)
# only one secret for encrypt for now, use the first vault_id and use its first secret
# TODO: exception if more than one?
self.encrypt_vault_id = encrypt_secret[0]
self.encrypt_secret = encrypt_secret[1]
if action in ['rekey']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
# print('encrypt_vault_id: %s' % encrypt_vault_id)
# print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
# new_vault_ids should only ever be one item, from
# load the default vault ids if we are using encrypt-vault-id
new_vault_ids = []
if encrypt_vault_id:
new_vault_ids = default_vault_ids
if context.CLIARGS['new_vault_id']:
new_vault_ids.append(context.CLIARGS['new_vault_id'])
new_vault_password_files = []
if context.CLIARGS['new_vault_password_file']:
new_vault_password_files.append(context.CLIARGS['new_vault_password_file'])
new_vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=new_vault_ids,
vault_password_files=new_vault_password_files,
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
create_new_password=True)
if not new_vault_secrets:
raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
# There is only one new_vault_id currently and one new_vault_secret, or we
# use the id specified in --encrypt-vault-id
new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
encrypt_vault_id=encrypt_vault_id)
self.new_encrypt_vault_id = new_encrypt_secret[0]
self.new_encrypt_secret = new_encrypt_secret[1]
loader.set_vault_secrets(vault_secrets)
# FIXME: do we need to create VaultEditor here? its not reused
vault = VaultLib(vault_secrets)
self.editor = VaultEditor(vault)
context.CLIARGS['func']()
# and restore umask
os.umask(old_umask)
def execute_encrypt(self):
""" encrypt the supplied file using the provided vault secret """
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading plaintext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
# Fixme: use the correct vau
self.editor.encrypt_file(f, self.encrypt_secret,
vault_id=self.encrypt_vault_id,
output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
@staticmethod
def format_ciphertext_yaml(b_ciphertext, indent=None, name=None):
indent = indent or 10
block_format_var_name = ""
if name:
block_format_var_name = "%s: " % name
block_format_header = "%s!vault |" % block_format_var_name
lines = []
vault_ciphertext = to_text(b_ciphertext)
lines.append(block_format_header)
for line in vault_ciphertext.splitlines():
lines.append('%s%s' % (' ' * indent, line))
yaml_ciphertext = '\n'.join(lines)
return yaml_ciphertext
def execute_encrypt_string(self):
""" encrypt the supplied string using the provided vault secret """
b_plaintext = None
# Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
b_plaintext_list = []
# remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
# we don't add it to the plaintext list
args = [x for x in context.CLIARGS['args'] if x != '-']
# We can prompt and read input, or read from stdin, but not both.
if context.CLIARGS['encrypt_string_prompt']:
msg = "String to encrypt: "
name = None
name_prompt_response = display.prompt('Variable name (enter for no name): ')
# TODO: enforce var naming rules?
if name_prompt_response != "":
name = name_prompt_response
# TODO: could prompt for which vault_id to use for each plaintext string
# currently, it will just be the default
hide_input = not context.CLIARGS['show_string_input']
if hide_input:
msg = "String to encrypt (hidden): "
else:
msg = "String to encrypt:"
prompt_response = display.prompt(msg, private=hide_input)
if prompt_response == '':
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
b_plaintext = to_bytes(prompt_response)
b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
# read from stdin
if self.encrypt_string_read_stdin:
if sys.stdout.isatty():
display.display("Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a newline)", stderr=True)
stdin_text = sys.stdin.read()
if stdin_text == '':
raise AnsibleOptionsError('stdin was empty, not encrypting')
if sys.stdout.isatty() and not stdin_text.endswith("\n"):
display.display("\n")
b_plaintext = to_bytes(stdin_text)
# defaults to None
name = context.CLIARGS['encrypt_string_stdin_name']
b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
# use any leftover args as strings to encrypt
# Try to match args up to --name options
if context.CLIARGS.get('encrypt_string_names', False):
name_and_text_list = list(zip(context.CLIARGS['encrypt_string_names'], args))
# Some but not enough --name's to name each var
if len(args) > len(name_and_text_list):
# Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
display.display('The number of --name options do not match the number of args.',
stderr=True)
display.display('The last named variable will be "%s". The rest will not have'
' names.' % context.CLIARGS['encrypt_string_names'][-1],
stderr=True)
# Add the rest of the args without specifying a name
for extra_arg in args[len(name_and_text_list):]:
name_and_text_list.append((None, extra_arg))
# if no --names are provided, just use the args without a name.
else:
name_and_text_list = [(None, x) for x in args]
# Convert the plaintext text objects to bytestrings and collect
for name_and_text in name_and_text_list:
name, plaintext = name_and_text
if plaintext == '':
raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
b_plaintext = to_bytes(plaintext)
b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
# TODO: specify vault_id per string?
# Format the encrypted strings and any corresponding stderr output
outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
b_outs = []
for output in outputs:
err = output.get('err', None)
out = output.get('out', '')
if err:
sys.stderr.write(err)
b_outs.append(to_bytes(out))
# The output must end with a newline to play nice with terminal representation.
# Refs:
# * https://stackoverflow.com/a/729795/595220
# * https://github.com/ansible/ansible/issues/78932
b_outs.append(b'')
self.editor.write_data(b'\n'.join(b_outs), context.CLIARGS['output_file'] or '-')
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
# TODO: offer block or string ala eyaml
def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
# If we are only showing one item in the output, we don't need to included commented
# delimiters in the text
show_delimiter = False
if len(b_plaintext_list) > 1:
show_delimiter = True
# list of dicts {'out': '', 'err': ''}
output = []
# Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
# For more than one input, show some differentiating info in the stderr output so we can tell them
# apart. If we have a var name, we include that in the yaml
for index, b_plaintext_info in enumerate(b_plaintext_list):
# (the text itself, which input it came from, its name)
b_plaintext, src, name = b_plaintext_info
b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret, vault_id=vault_id)
# block formatting
yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
err_msg = None
if show_delimiter:
human_index = index + 1
if name:
err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
else:
err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
output.append({'out': yaml_text, 'err': err_msg})
return output
def execute_decrypt(self):
""" decrypt the supplied file using the provided vault secret """
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading ciphertext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
self.editor.decrypt_file(f, output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Decryption successful", stderr=True)
def execute_create(self):
""" create and open a file in an editor that will be encrypted with the provided vault secret when closed"""
if len(context.CLIARGS['args']) != 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
if sys.stdout.isatty() or context.CLIARGS['skip_tty_check']:
self.editor.create_file(context.CLIARGS['args'][0], self.encrypt_secret,
vault_id=self.encrypt_vault_id)
else:
raise AnsibleOptionsError("not a tty, editor cannot be opened")
def execute_edit(self):
""" open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed"""
for f in context.CLIARGS['args']:
self.editor.edit_file(f)
def execute_view(self):
""" open, decrypt and view an existing vaulted file using a pager using the supplied vault secret """
for f in context.CLIARGS['args']:
# Note: vault should return byte strings because it could encrypt
# and decrypt binary files. We are responsible for changing it to
# unicode here because we are displaying it and therefore can make
# the decision that the display doesn't have to be precisely what
# the input was (leave that to decrypt instead)
plaintext = self.editor.plaintext(f)
self.pager(to_text(plaintext))
def execute_rekey(self):
""" re-encrypt a vaulted file with a new secret, the previous secret is required """
for f in context.CLIARGS['args']:
# FIXME: plumb in vault_id, use the default new_vault_secret for now
self.editor.rekey_file(f, self.new_encrypt_secret,
self.new_encrypt_vault_id)
display.display("Rekey successful", stderr=True)
def main(args=None):
VaultCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 22,954
|
Python
|
.py
| 372
| 48.545699
| 160
| 0.616203
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,318
|
console.py
|
ansible_ansible/lib/ansible/cli/console.py
|
#!/usr/bin/env python
# Copyright: (c) 2014, Nandor Sivok <dominis@haxor.hu>
# Copyright: (c) 2016, Redhat Inc
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import atexit
import cmd
import getpass
import readline
import os
import sys
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.plugins.list import list_plugins
from ansible.plugins.loader import module_loader, fragment_loader
from ansible.utils import plugin_docs
from ansible.utils.color import stringc
from ansible.utils.display import Display
display = Display()
class ConsoleCLI(CLI, cmd.Cmd):
"""
A REPL that allows for running ad-hoc tasks against a chosen inventory
from a nice shell with built-in tab completion (based on dominis'
``ansible-shell``).
It supports several commands, and you can modify its configuration at
runtime:
- ``cd [pattern]``: change host/group
(you can use host patterns eg.: ``app*.dc*:!app01*``)
- ``list``: list available hosts in the current path
- ``list groups``: list groups included in the current path
- ``become``: toggle the become flag
- ``!``: forces shell module instead of the ansible module
(``!yum update -y``)
- ``verbosity [num]``: set the verbosity level
- ``forks [num]``: set the number of forks
- ``become_user [user]``: set the become_user
- ``remote_user [user]``: set the remote_user
- ``become_method [method]``: set the privilege escalation method
- ``check [bool]``: toggle check mode
- ``diff [bool]``: toggle diff mode
- ``timeout [integer]``: set the timeout of tasks in seconds
(0 to disable)
- ``help [command/module]``: display documentation for
the command or module
- ``exit``: exit ``ansible-console``
"""
name = 'ansible-console'
modules = [] # type: list[str] | None
ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob '
'selecting hosts in inventory or any combination of the two separated by commas.'}
# use specific to console, but fallback to highlight for backwards compatibility
NORMAL_PROMPT = C.COLOR_CONSOLE_PROMPT or C.COLOR_HIGHLIGHT
def __init__(self, args):
super(ConsoleCLI, self).__init__(args)
self.intro = 'Welcome to the ansible console. Type help or ? to list commands.\n'
self.groups = []
self.hosts = []
self.pattern = None
self.variable_manager = None
self.loader = None
self.passwords = dict()
self.cwd = '*'
# Defaults for these are set from the CLI in run()
self.remote_user = None
self.become = None
self.become_user = None
self.become_method = None
self.check_mode = None
self.diff = None
self.forks = None
self.task_timeout = None
self.collections = None
cmd.Cmd.__init__(self)
def init_parser(self):
super(ConsoleCLI, self).init_parser(
desc="REPL console for executing Ansible tasks.",
epilog="This is not a live session/connection: each task is executed in the background and returns its results."
)
opt_help.add_runas_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_connect_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_tasknoplay_options(self.parser)
# options unique to shell
self.parser.add_argument('pattern', help='host pattern', metavar='pattern', default='all', nargs='?')
self.parser.add_argument('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
def post_process_args(self, options):
options = super(ConsoleCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, fork_opts=True)
return options
def get_names(self):
return dir(self)
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
self.cmdloop()
except EOFError:
self.display("[Ansible-console was exited]")
self.do_exit(self)
def set_prompt(self):
login_user = self.remote_user or getpass.getuser()
self.selected = self.inventory.list_hosts(self.cwd)
prompt = "%s@%s (%d)[f:%s]" % (login_user, self.cwd, len(self.selected), self.forks)
if self.become and self.become_user in [None, 'root']:
prompt += "# "
color = C.COLOR_ERROR
else:
prompt += "$ "
color = self.NORMAL_PROMPT
self.prompt = stringc(prompt, color, wrap_nonvisible_chars=True)
def list_modules(self):
return list_plugins('module', self.collections)
def default(self, line, forceshell=False):
""" actually runs modules """
if line.startswith("#"):
return False
if not self.cwd:
display.error("No host found")
return False
# defaults
module = 'shell'
module_args = line
if forceshell is not True:
possible_module, *possible_args = line.split()
if module_loader.find_plugin(possible_module):
# we found module!
module = possible_module
if possible_args:
module_args = ' '.join(possible_args)
else:
module_args = ''
if self.callback:
cb = self.callback
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
cb = C.DEFAULT_STDOUT_CALLBACK
else:
cb = 'minimal'
result = None
try:
check_raw = module in C._ACTION_ALLOWS_RAW_ARGS
task = dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)), timeout=self.task_timeout)
play_ds = dict(
name="Ansible Shell",
hosts=self.cwd,
gather_facts='no',
tasks=[task],
remote_user=self.remote_user,
become=self.become,
become_user=self.become_user,
become_method=self.become_method,
check_mode=self.check_mode,
diff=self.diff,
collections=self.collections,
)
play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
except Exception as e:
display.error(u"Unable to build command: %s" % to_text(e))
return False
try:
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
passwords=self.passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=False,
forks=self.forks,
)
result = self._tqm.run(play)
display.debug(result)
finally:
if self._tqm:
self._tqm.cleanup()
if self.loader:
self.loader.cleanup_all_tmp_files()
if result is None:
display.error("No hosts found")
return False
except KeyboardInterrupt:
display.error('User interrupted execution')
return False
except Exception as e:
if self.verbosity >= 3:
import traceback
display.v(traceback.format_exc())
display.error(to_text(e))
return False
def emptyline(self):
return
def do_shell(self, arg):
"""
You can run shell commands through the shell module.
eg.:
shell ps uax | grep java | wc -l
shell killall python
shell halt -n
You can use the ! to force the shell module. eg.:
!ps aux | grep java | wc -l
"""
self.default(arg, True)
def help_shell(self):
display.display("You can run shell commands through the shell module.")
def do_forks(self, arg):
"""Set the number of forks"""
if arg:
try:
forks = int(arg)
except TypeError:
display.error('Invalid argument for "forks"')
self.usage_forks()
if forks > 0:
self.forks = forks
self.set_prompt()
else:
display.display('forks must be greater than or equal to 1')
else:
self.usage_forks()
def help_forks(self):
display.display("Set the number of forks to use per task")
self.usage_forks()
def usage_forks(self):
display.display('Usage: forks <number>')
do_serial = do_forks
help_serial = help_forks
def do_collections(self, arg):
"""Set list of collections for 'short name' usage"""
if arg in ('', 'none'):
self.collections = None
elif not arg:
self.usage_collections()
else:
collections = arg.split(',')
for collection in collections:
if self.collections is None:
self.collections = []
self.collections.append(collection.strip())
if self.collections:
display.v('Collections name search is set to: %s' % ', '.join(self.collections))
else:
display.v('Collections name search is using defaults')
def help_collections(self):
display.display("Set the collection name search path when using short names for plugins")
self.usage_collections()
def usage_collections(self):
display.display('Usage: collections <collection1>[, <collection2> ...]\n Use empty quotes or "none" to reset to default.\n')
def do_verbosity(self, arg):
"""Set verbosity level"""
if not arg:
display.display('Usage: verbosity <number>')
else:
try:
display.verbosity = int(arg)
display.v('verbosity level set to %s' % arg)
except (TypeError, ValueError) as e:
display.error('The verbosity must be a valid integer: %s' % to_text(e))
def help_verbosity(self):
display.display("Set the verbosity level, equivalent to -v for 1 and -vvvv for 4.")
def do_cd(self, arg):
"""
Change active host/group. You can use hosts patterns as well eg.:
cd webservers
cd webservers:dbservers
cd webservers:!phoenix
cd webservers:&staging
cd webservers:dbservers:&staging:!phoenix
"""
if not arg:
self.cwd = '*'
elif arg in '/*':
self.cwd = 'all'
elif self.inventory.get_hosts(arg):
self.cwd = arg
else:
display.display("no host matched")
self.set_prompt()
def help_cd(self):
display.display("Change active host/group. ")
self.usage_cd()
def usage_cd(self):
display.display("Usage: cd <group>|<host>|<host pattern>")
def do_list(self, arg):
"""List the hosts in the current group"""
if not arg:
for host in self.selected:
display.display(host.name)
elif arg == 'groups':
for group in self.groups:
display.display(group)
else:
display.error('Invalid option passed to "list"')
self.help_list()
def help_list(self):
display.display("List the hosts in the current group or a list of groups if you add 'groups'.")
def do_become(self, arg):
"""Toggle whether plays run with become"""
if arg:
self.become = boolean(arg, strict=False)
display.v("become changed to %s" % self.become)
self.set_prompt()
else:
display.display("Please specify become value, e.g. `become yes`")
def help_become(self):
display.display("Toggle whether the tasks are run with become")
def do_remote_user(self, arg):
"""Given a username, set the remote user plays are run by"""
if arg:
self.remote_user = arg
self.set_prompt()
else:
display.display("Please specify a remote user, e.g. `remote_user root`")
def help_remote_user(self):
display.display("Set the user for use as login to the remote target")
def do_become_user(self, arg):
"""Given a username, set the user that plays are run by when using become"""
if arg:
self.become_user = arg
else:
display.display("Please specify a user, e.g. `become_user jenkins`")
display.v("Current user is %s" % self.become_user)
self.set_prompt()
def help_become_user(self):
display.display("Set the user for use with privilege escalation (which remote user attempts to 'become' when become is enabled)")
def do_become_method(self, arg):
"""Given a become_method, set the privilege escalation method when using become"""
if arg:
self.become_method = arg
display.v("become_method changed to %s" % self.become_method)
else:
display.display("Please specify a become_method, e.g. `become_method su`")
display.v("Current become_method is %s" % self.become_method)
def help_become_method(self):
display.display("Set the privilege escalation plugin to use when become is enabled")
def do_check(self, arg):
"""Toggle whether plays run with check mode"""
if arg:
self.check_mode = boolean(arg, strict=False)
display.display("check mode changed to %s" % self.check_mode)
else:
display.display("Please specify check mode value, e.g. `check yes`")
display.v("check mode is currently %s." % self.check_mode)
def help_check(self):
display.display("Toggle check_mode for the tasks")
def do_diff(self, arg):
"""Toggle whether plays run with diff"""
if arg:
self.diff = boolean(arg, strict=False)
display.display("diff mode changed to %s" % self.diff)
else:
display.display("Please specify a diff value , e.g. `diff yes`")
display.v("diff mode is currently %s" % self.diff)
def help_diff(self):
display.display("Toggle diff output for the tasks")
def do_timeout(self, arg):
"""Set the timeout"""
if arg:
try:
timeout = int(arg)
if timeout < 0:
display.error('The timeout must be greater than or equal to 1, use 0 to disable')
else:
self.task_timeout = timeout
except (TypeError, ValueError) as e:
display.error('The timeout must be a valid positive integer, or 0 to disable: %s' % to_text(e))
else:
self.usage_timeout()
def help_timeout(self):
display.display("Set task timeout in seconds")
self.usage_timeout()
def usage_timeout(self):
display.display('Usage: timeout <seconds>')
def do_exit(self, args):
"""Exits from the console"""
sys.stdout.write('\nAnsible-console was exited.\n')
return -1
def help_exit(self):
display.display("LEAVE!")
do_EOF = do_exit
help_EOF = help_exit
def helpdefault(self, module_name):
if module_name:
in_path = module_loader.find_plugin(module_name)
if in_path:
oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader)
if oc:
display.display(oc['short_description'])
display.display('Parameters:')
for opt in oc['options'].keys():
display.display(' ' + stringc(opt, self.NORMAL_PROMPT) + ' ' + oc['options'][opt]['description'][0])
else:
display.error('No documentation found for %s.' % module_name)
else:
display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)
def help_help(self):
display.warning("Don't be redundant!")
def complete_cd(self, text, line, begidx, endidx):
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
if self.cwd in ('all', '*', '\\'):
completions = self.hosts + self.groups
else:
completions = [x.name for x in self.inventory.list_hosts(self.cwd)]
return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))]
def completedefault(self, text, line, begidx, endidx):
if line.split()[0] in self.list_modules():
mline = line.split(' ')[-1]
offs = len(mline) - len(text)
completions = self.module_args(line.split()[0])
return [s[offs:] + '=' for s in completions if s.startswith(mline)]
def module_args(self, module_name):
in_path = module_loader.find_plugin(module_name)
oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader, is_module=True)
return list(oc['options'].keys())
def run(self):
super(ConsoleCLI, self).run()
sshpass = None
becomepass = None
# hosts
self.pattern = context.CLIARGS['pattern']
self.cwd = self.pattern
# Defaults from the command line
self.remote_user = context.CLIARGS['remote_user']
self.become = context.CLIARGS['become']
self.become_user = context.CLIARGS['become_user']
self.become_method = context.CLIARGS['become_method']
self.check_mode = context.CLIARGS['check']
self.diff = context.CLIARGS['diff']
self.forks = context.CLIARGS['forks']
self.task_timeout = context.CLIARGS['task_timeout']
# set module path if needed
if context.CLIARGS['module_path']:
for path in context.CLIARGS['module_path']:
if path:
module_loader.add_directory(path)
# dynamically add 'canonical' modules as commands, aliases could be used and dynamically loaded
self.modules = self.list_modules()
for module in self.modules:
setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))
(sshpass, becomepass) = self.ask_passwords()
self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
self.loader, self.inventory, self.variable_manager = self._play_prereqs()
hosts = self.get_host_list(self.inventory, context.CLIARGS['subset'], self.pattern)
self.groups = self.inventory.list_groups()
self.hosts = [x.name for x in hosts]
# This hack is to work around readline issues on a mac:
# http://stackoverflow.com/a/7116997/541202
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
self.set_prompt()
self.cmdloop()
def __getattr__(self, name):
""" handle not found to populate dynamically a module function if module matching name exists """
attr = None
if name.startswith('do_'):
module = name.replace('do_', '')
if module_loader.find_plugin(module):
setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg))
attr = object.__getattr__(self, name)
elif name.startswith('help_'):
module = name.replace('help_', '')
if module_loader.find_plugin(module):
setattr(self, name, lambda module=module: self.helpdefault(module))
attr = object.__getattr__(self, name)
if attr is None:
raise AttributeError(f"{self.__class__} does not have a {name} attribute")
return attr
def main(args=None):
ConsoleCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 21,985
|
Python
|
.py
| 500
| 33.488
| 137
| 0.596829
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,319
|
galaxy.py
|
ansible_ansible/lib/ansible/cli/galaxy.py
|
#!/usr/bin/env python
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import argparse
import functools
import json
import os.path
import pathlib
import re
import shutil
import sys
import textwrap
import time
import typing as t
from dataclasses import dataclass
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI, GalaxyError
from ansible.galaxy.collection import (
build_collection,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections,
SIGNATURE_COUNT_RE,
)
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.collection.gpg import GPG_ERROR_MAP
from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
from ansible.utils.vars import load_extra_vars
display = Display()
urlparse = six.moves.urllib.parse.urlparse
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
This decorator constructs a ConcreteArtifactsManager and maintains
the related temporary directory auto-cleanup around the target
method invocation.
"""
@functools.wraps(wrapped_method)
def method_wrapper(*args, **kwargs):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
# FIXME: use validate_certs context from Galaxy servers when downloading collections
# .get used here for when this is used in a non-CLI context
artifacts_manager_kwargs = {'validate_certs': context.CLIARGS.get('resolved_validate_certs', True)}
keyring = context.CLIARGS.get('keyring', None)
if keyring is not None:
artifacts_manager_kwargs.update({
'keyring': GalaxyCLI._resolve_path(keyring),
'required_signature_count': context.CLIARGS.get('required_valid_signature_count', None),
'ignore_signature_errors': context.CLIARGS.get('ignore_gpg_errors', None),
})
with ConcreteArtifactsManager.under_tmpdir(
C.DEFAULT_LOCAL_TMP,
**artifacts_manager_kwargs
) as concrete_artifact_cm:
kwargs['artifacts_manager'] = concrete_artifact_cm
return wrapped_method(*args, **kwargs)
return method_wrapper
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection.fqcn),
version=collection.ver,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if not is_iterable(collections):
collections = (collections, )
fqcn_set = {to_text(c.fqcn) for c in collections}
version_set = {to_text(c.ver) for c in collections}
fqcn_length = len(max(fqcn_set or [''], key=len))
version_length = len(max(version_set or [''], key=len))
return fqcn_length, version_length
def validate_signature_count(value):
match = re.match(SIGNATURE_COUNT_RE, value)
if match is None:
raise ValueError(f"{value} is not a valid signature count value")
return value
@dataclass
class RoleDistributionServer:
_api: t.Union[GalaxyAPI, None]
api_servers: list[GalaxyAPI]
@property
def api(self):
if self._api:
return self._api
for server in self.api_servers:
try:
if u'v1' in server.available_api_versions:
self._api = server
break
except Exception:
continue
if not self._api:
self._api = self.api_servers[0]
return self._api
class GalaxyCLI(CLI):
"""Command to manage Ansible roles and collections.
None of the CLI tools are designed to run concurrently with themselves.
Use an external scheduler and/or locking to ensure there are no clashing operations.
"""
name = 'ansible-galaxy'
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
self._raw_args = args
self._implicit_role = False
if len(args) > 1:
# Inject role into sys.argv[1] as a backwards compatibility step
if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
args.insert(1, 'role')
self._implicit_role = True
# since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
if args[1:3] == ['role', 'login']:
display.error(
"The login command was removed in late 2020. An API key is now required to publish roles or collections "
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
sys.exit(1)
self.api_servers = []
self.galaxy = None
self.lazy_role_api = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
""" create an options parser for bin/ansible """
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--api-version', type=int, choices=[2, 3], help=argparse.SUPPRESS) # Hidden argument that should only be used in our tests
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', help='Ignore SSL certificate validation errors.', default=None)
# --timeout uses the default None to handle two different scenarios.
# * --timeout > C.GALAXY_SERVER_TIMEOUT for non-configured servers
# * --timeout > server-specific timeout > C.GALAXY_SERVER_TIMEOUT for configured servers.
common.add_argument('--timeout', dest='timeout', type=int,
help="The time to wait for operations against the galaxy server, defaults to 60s.")
opt_help.add_verbosity_options(common)
force = opt_help.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
cache_options = opt_help.ArgumentParser(add_help=False)
cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
default=False, help='Clear the existing server response cache.')
cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
help='Do not use the server response cache.')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection.set_defaults(func=self.execute_collection) # to satisfy doc build
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role.set_defaults(func=self.execute_role) # to satisfy doc build
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
opt_help.add_runtask_options(init_parser)
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
if galaxy_type == 'collection':
list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
help="Format to display the list of collections in.")
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The installed collection(s) name. '
'This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Validate collection integrity locally without contacting server for '
'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
verify_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
verify_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before using '
'it to verify the rest of the contents of a collection from a Galaxy server. Use in '
'conjunction with a positional collection name (mutually exclusive with --requirements-file).')
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or all to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A space separated list of status codes to ignore during signature verification (for example, NO_PUBKEY FAILURE). ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' \
'Note: specify these after positional arguments or use -- to separate them.'
verify_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
verify_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
verify_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file. This is mutually exclusive with -r.'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
if self._implicit_role:
# might install both roles and collections
description_text = (
'Install roles and collections from file(s), URL(s) or Ansible '
'Galaxy to the first entry in the config COLLECTIONS_PATH for collections '
'and first entry in the config ROLES_PATH for roles. '
'The first entry in the config ROLES_PATH can be overridden by --roles-path '
'or -p, but this will result in only roles being installed.'
)
prog = 'ansible-galaxy install'
else:
prog = f"ansible-galaxy {galaxy_type} install"
description_text = (
'Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy to the first entry in the config {1}S_PATH '
'unless overridden by --{0}s-path.'.format(galaxy_type, galaxy_type.upper())
)
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type),
description=description_text,
prog=prog,)
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or -1 to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A space separated list of status codes to ignore during signature verification (for example, NO_PUBKEY FAILURE). ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' \
'Note: specify these after positional arguments or use -- to separate them.'
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=self._get_default_collection_path(),
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before '
'installing the collection from a Galaxy server. Use in conjunction with a positional '
'collection name (mutually exclusive with --requirements-file).')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Install collection artifacts (tarballs) without contacting any distribution servers. '
'This does not apply to collections in remote Git repositories or URLs to remote tarballs.'
)
else:
if self._implicit_role:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of collections and roles to be installed.')
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
r_re = re.compile(r'^(?<!-)-[a-zA-Z]*r[a-zA-Z]*') # -r, -fr
contains_r = bool([a for a in self._raw_args if r_re.match(a)])
role_file_re = re.compile(r'--role-file($|=)') # --role-file foo, --role-file=foo
contains_role_file = bool([a for a in self._raw_args if role_file_re.match(a)])
if self._implicit_role and (contains_r or contains_role_file):
# Any collections in the requirements files will also be installed
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during collection signature verification')
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
# ensure we have 'usable' cli option
setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
# the default if validate_certs is None
setattr(options, 'resolved_validate_certs', (options.validate_certs if options.validate_certs is not None else not C.GALAXY_IGNORE_CERTS))
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
# dynamically add per server config depending on declared servers
C.config.load_galaxy_server_defs(C.GALAXY_SERVER_LIST)
galaxy_options = {}
for optional_key in ['clear_response_cache', 'no_cache']:
if optional_key in context.CLIARGS:
galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_priority, server_key in enumerate(server_list, start=1):
# resolve the config created options above with existing config and user options
server_options = C.config.get_plugin_options(plugin_type='galaxy_server', name=server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
auth_url = server_options.pop('auth_url')
client_id = server_options.pop('client_id')
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
api_version = server_options.pop('api_version')
if server_options['validate_certs'] is None:
server_options['validate_certs'] = context.CLIARGS['resolved_validate_certs']
validate_certs = server_options['validate_certs']
# This allows a user to explicitly force use of an API version when
# multiple versions are supported. This was added for testing
# against pulp_ansible and I'm not sure it has a practical purpose
# outside of this use case. As such, this option is not documented
# as of now
if api_version:
display.warning(
f'The specified "api_version" configuration for the galaxy server "{server_key}" is '
'not a public configuration, and may be removed at any time without warning.'
)
server_options['available_api_versions'] = {'v%s' % api_version: '/v%s' % api_version}
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username, server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=validate_certs,
client_id=client_id)
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
self.galaxy, server_key,
priority=server_priority,
**server_options
))
cmd_server = context.CLIARGS['api_server']
if context.CLIARGS['api_version']:
api_version = context.CLIARGS['api_version']
display.warning(
'The --api-version is not a public argument, and may be removed at any time without warning.'
)
galaxy_options['available_api_versions'] = {'v%s' % api_version: '/v%s' % api_version}
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
validate_certs = context.CLIARGS['resolved_validate_certs']
default_server_timeout = context.CLIARGS['timeout'] if context.CLIARGS['timeout'] is not None else C.GALAXY_SERVER_TIMEOUT
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
priority=len(config_servers) + 1,
validate_certs=validate_certs,
timeout=default_server_timeout,
**galaxy_options
))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
priority=0,
validate_certs=validate_certs,
timeout=default_server_timeout,
**galaxy_options
))
# checks api versions once a GalaxyRole makes an api call
# self.api can be used to evaluate the best server immediately
self.lazy_role_api = RoleDistributionServer(None, self.api_servers)
return context.CLIARGS['func']()
@property
def api(self):
return self.lazy_role_api.api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None, validate_signature_options=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
type: git|file|url|galaxy
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.lazy_role_api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.lazy_role_api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
elif isinstance(file_requirements, dict):
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
requirements['collections'] = [
Requirement.from_requirement_dict(
self._init_coll_req_dict(collection_req),
artifacts_manager,
validate_signature_options,
)
for collection_req in file_requirements.get('collections') or []
]
else:
raise AnsibleError(f"Expecting requirements yaml to be a list or dictionary but got {type(file_requirements).__name__}")
return requirements
def _init_coll_req_dict(self, coll_req):
if not isinstance(coll_req, dict):
# Assume it's a string:
return {'name': coll_req}
if (
'name' not in coll_req or
not coll_req.get('source') or
coll_req.get('type', 'galaxy') != 'galaxy'
):
return coll_req
# Try and match up the requirement source with our list of Galaxy API
# servers defined in the config, otherwise create a server with that
# URL without any auth.
coll_req['source'] = next(
iter(
srvr for srvr in self.api_servers
if coll_req['source'] in {srvr.name, srvr.api_server}
),
GalaxyAPI(
self.galaxy,
'explicit_requirement_{name!s}'.format(
name=coll_req['name'],
),
coll_req['source'],
validate_certs=context.CLIARGS['resolved_validate_certs'],
),
)
return coll_req
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
# make sure we have a trailing newline returned
text.append(u"")
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(
self, collections, requirements_file,
signatures=None,
artifacts_manager=None,
):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
if signatures is not None:
raise AnsibleError(
"The --signatures option and --requirements-file are mutually exclusive. "
"Use the --signatures with positional collection_name args or provide a "
"'signatures' key for requirements in the --requirements-file."
)
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(
requirements_file,
allow_old_format=False,
artifacts_manager=artifacts_manager,
)
else:
requirements = {
'collections': [
Requirement.from_string(coll_input, artifacts_manager, signatures)
for coll_input in collections
],
'roles': [],
}
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(
to_text(collection_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force,
)
@with_collection_artifacts_manager
def execute_download(self, artifacts_manager=None):
"""Download collections and their dependencies as a tarball for an offline install."""
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(
requirements, download_path, self.api_servers, no_deps,
context.CLIARGS['allow_pre_release'],
artifacts_manager=artifacts_manager,
)
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
# delete the contents rather than the collection root in case init was run from the root (--init-path ../../)
for root, dirs, files in os.walk(b_obj_path, topdown=True):
for old_dir in dirs:
path = os.path.join(root, old_dir)
shutil.rmtree(path)
for old_file in files:
path = os.path.join(root, old_file)
os.unlink(path)
if obj_skeleton is not None:
own_skeleton = False
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
inject_data.update(load_extra_vars(loader))
templar = Templar(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
# Filter out ignored directory names
# Use [:] to mutate the list os.walk uses
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
try:
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
except AnsibleError as e:
shutil.rmtree(b_obj_path)
raise AnsibleError(f"Failed to create {galaxy_type.title()} {obj_name}. Templating {src_template} failed with the error: {e}") from e
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path), follow_symlinks=False)
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if os.path.exists(b_dir_path):
continue
b_src_dir = to_bytes(os.path.join(root, d), errors='surrogate_or_strict')
if os.path.islink(b_src_dir):
shutil.copyfile(b_src_dir, b_dir_path, follow_symlinks=False)
else:
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.lazy_role_api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
if not context.CLIARGS['offline']:
remote_data = None
try:
remote_data = self.api.lookup_role_by_name(role, False)
except GalaxyError as e:
if e.http_code == 400 and 'Bad Request' in e.message:
# Role does not exist in Ansible Galaxy
data = u"- the role %s was not found" % role
break
raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
if remote_data:
role_info.update(remote_data)
else:
data = u"- the role %s was not found" % role
break
elif context.CLIARGS['offline'] and not gr._exists:
data = u"- the role %s was not found" % role
break
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
self.pager(data)
@with_collection_artifacts_manager
def execute_verify(self, artifacts_manager=None):
"""Compare checksums with the collection(s) found on the server and the installed copy. This does not verify dependencies."""
collections = context.CLIARGS['args']
search_paths = AnsibleCollectionConfig.collection_paths
ignore_errors = context.CLIARGS['ignore_errors']
local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
signatures = context.CLIARGS['signatures']
if signatures is not None:
signatures = list(signatures)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
results = verify_collections(
requirements, resolved_paths,
self.api_servers, ignore_errors,
local_verify_only=local_verify_only,
artifacts_manager=artifacts_manager,
)
if any(result for result in results if not result.success):
return 1
return 0
@with_collection_artifacts_manager
def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
collection_path = None
signatures = context.CLIARGS.get('signatures')
if signatures is not None:
signatures = list(signatures)
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
"run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
"'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
# TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
collection_requirements = []
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
requirements = self._require_one_of_collections_requirements(
install_items, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)
collection_requirements = requirements['collections']
if requirements['roles']:
display.vvv(two_type_warning.format('role'))
else:
if not install_items and requirements_file is None:
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
if requirements_file:
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
galaxy_args = self._raw_args
will_install_collections = self._implicit_role and '-p' not in galaxy_args and '--roles-path' not in galaxy_args
requirements = self._parse_requirements_file(
requirements_file,
artifacts_manager=artifacts_manager,
validate_signature_options=will_install_collections,
)
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
# argument was not used. If collections are present in the requirements then at least display a msg.
if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
'--roles-path' in galaxy_args):
# We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
# was explicit about the type and shouldn't care that collections were skipped.
display_func = display.warning if self._implicit_role else display.vvv
display_func(two_type_warning.format('collection'))
else:
collection_path = self._get_default_collection_path()
collection_requirements = requirements['collections']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
role_requirements.append(GalaxyRole(self.galaxy, self.lazy_role_api, **role))
if not role_requirements and not collection_requirements:
display.display("Skipping install, no requirements found")
return
if role_requirements:
display.display("Starting galaxy role install process")
self._execute_install_role(role_requirements)
if collection_requirements:
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
self._execute_install_collection(
collection_requirements, collection_path,
artifacts_manager=artifacts_manager,
)
def _execute_install_collection(
self, requirements, path, artifacts_manager,
):
force = context.CLIARGS['force']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
try:
disable_gpg_verify = context.CLIARGS['disable_gpg_verify']
except KeyError:
if self._implicit_role:
raise AnsibleError(
'Unable to properly parse command line arguments. Please use "ansible-galaxy collection install" '
'instead of "ansible-galaxy install".'
)
raise
# If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
managed_paths = set(validate_collection_path(p) for p in C.COLLECTIONS_PATHS)
read_req_paths = set(validate_collection_path(p) for p in AnsibleCollectionConfig.collection_paths)
unexpected_path = C.GALAXY_COLLECTIONS_PATH_WARNING and not any(p.startswith(path) for p in managed_paths)
if unexpected_path and any(p.startswith(path) for p in read_req_paths):
display.warning(
f"The specified collections path '{path}' appears to be part of the pip Ansible package. "
"Managing these directly with ansible-galaxy could break the Ansible package. "
"Install collections to a configured collections path, which will take precedence over "
"collections found in the PYTHONPATH."
)
elif unexpected_path:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection will not be picked up in an Ansible "
"run, unless within a playbook-adjacent collections directory." % (to_text(path), to_text(":".join(collections_path))))
output_path = validate_collection_path(path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(
requirements, output_path, self.api_servers, ignore_errors,
no_deps, force, force_with_deps, upgrade,
allow_pre_release=allow_pre_release,
artifacts_manager=artifacts_manager,
disable_gpg_verify=disable_gpg_verify,
offline=context.CLIARGS.get('offline', False),
read_requirement_paths=read_req_paths,
)
return 0
def _execute_install_role(self, requirements):
role_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
for role in requirements:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
# NOTE: the meta file is also required for installing the role, not just dependencies
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata_dependencies + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.lazy_role_api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in requirements:
display.display('- adding dependency: %s' % to_text(dep_role))
requirements.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependent role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
requirements.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
requirements.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.lazy_role_api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.lazy_role_api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError(
"- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type'])
)
return 0
@with_collection_artifacts_manager
def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
:param artifacts_manager: Artifacts manager.
"""
if artifacts_manager is not None:
artifacts_manager.require_build_metadata = False
output_format = context.CLIARGS['output_format']
collection_name = context.CLIARGS['collection']
default_collections_path = set(C.COLLECTIONS_PATHS)
collections_search_paths = (
set(context.CLIARGS['collections_path'] or []) | default_collections_path | set(AnsibleCollectionConfig.collection_paths)
)
collections_in_paths = {}
warnings = []
path_found = False
collection_found = False
namespace_filter = None
collection_filter = None
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace_filter, collection_filter = collection_name.split('.')
collections = list(find_existing_collections(
list(collections_search_paths),
artifacts_manager,
namespace_filter=namespace_filter,
collection_filter=collection_filter,
dedupe=False
))
seen = set()
fqcn_width, version_width = _get_collection_widths(collections)
for collection in sorted(collections, key=lambda c: c.src):
collection_found = True
collection_path = pathlib.Path(to_text(collection.src)).parent.parent.as_posix()
if output_format in {'yaml', 'json'}:
collections_in_paths.setdefault(collection_path, {})
collections_in_paths[collection_path][collection.fqcn] = {'version': collection.ver}
else:
if collection_path not in seen:
_display_header(
collection_path,
'Collection',
'Version',
fqcn_width,
version_width
)
seen.add(collection_path)
_display_collection(collection, fqcn_width, version_width)
path_found = False
for path in collections_search_paths:
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(path))
elif os.path.exists(path) and not os.path.isdir(path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(path))
else:
path_found = True
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not collections and not path_found:
raise AnsibleOptionsError(
"- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type'])
)
if output_format == 'json':
display.display(json.dumps(collections_in_paths))
elif output_format == 'yaml':
display.display(yaml_dump(collections_in_paths))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
""" searches for roles on the Ansible Galaxy server"""
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.warning("No roles match your search.")
return 0
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
rc = 0
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return rc
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if (state := task[0]['state']) in ['SUCCESS', 'FAILED']:
rc = ['SUCCESS', 'FAILED'].index(state)
finished = True
else:
time.sleep(10)
return rc
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return 0
def main(args=None):
GalaxyCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 95,000
|
Python
|
.py
| 1,563
| 45.692258
| 158
| 0.590011
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,320
|
__init__.py
|
ansible_ansible/lib/ansible/cli/__init__.py
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import locale
import os
import sys
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
if sys.version_info < (3, 11):
raise SystemExit(
'ERROR: Ansible requires Python 3.11 or newer on the controller. '
'Current version: %s' % ''.join(sys.version.splitlines())
)
def check_blocking_io():
"""Check stdin/stdout/stderr to make sure they are using blocking IO."""
handles = []
for handle in (sys.stdin, sys.stdout, sys.stderr):
# noinspection PyBroadException
try:
fd = handle.fileno()
except Exception:
continue # not a real file handle, such as during the import sanity test
if not os.get_blocking(fd):
handles.append(getattr(handle, 'name', None) or '#%s' % fd)
if handles:
raise SystemExit('ERROR: Ansible requires blocking IO on stdin/stdout/stderr. '
'Non-blocking file handles detected: %s' % ', '.join(_io for _io in handles))
check_blocking_io()
def initialize_locale():
"""Set the locale to the users default setting and ensure
the locale and filesystem encoding are UTF-8.
"""
try:
locale.setlocale(locale.LC_ALL, '')
dummy, encoding = locale.getlocale()
except (locale.Error, ValueError) as e:
raise SystemExit(
'ERROR: Ansible could not initialize the preferred locale: %s' % e
)
if not encoding or encoding.lower() not in ('utf-8', 'utf8'):
raise SystemExit('ERROR: Ansible requires the locale encoding to be UTF-8; Detected %s.' % encoding)
fs_enc = sys.getfilesystemencoding()
if fs_enc.lower() != 'utf-8':
raise SystemExit('ERROR: Ansible requires the filesystem encoding to be UTF-8; Detected %s.' % fs_enc)
initialize_locale()
from importlib.metadata import version
from ansible.module_utils.compat.version import LooseVersion
# Used for determining if the system is running a new enough Jinja2 version
# and should only restrict on our documented minimum versions
jinja2_version = version('jinja2')
if jinja2_version < LooseVersion('3.0'):
raise SystemExit(
'ERROR: Ansible requires Jinja2 3.0 or newer on the controller. '
'Current version: %s' % jinja2_version
)
import errno
import getpass
import subprocess
import traceback
from abc import ABC, abstractmethod
from pathlib import Path
try:
from ansible import constants as C
from ansible.utils.display import Display
display = Display()
except Exception as e:
print('ERROR: %s' % e, file=sys.stderr)
sys.exit(5)
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.file import is_executable
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs, init_plugin_loader
from ansible.release import __version__
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.path import unfrackpath
from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
class CLI(ABC):
""" code behind bin/ansible* programs """
PAGER = C.config.get_config_value('PAGER')
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
'changing source of code and can become unstable at any point.'
)
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
# Initialize plugin loader after parse, so that the init code can utilize parsed arguments
cli_collections_path = context.CLIARGS.get('collections_path') or []
if not is_sequence(cli_collections_path):
# In some contexts ``collections_path`` is singular
cli_collections_path = [cli_collections_path]
init_plugin_loader(cli_collections_path)
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
C.handle_config_noise(display)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
auto_prompt=auto_prompt)
last_exception = found_vault_secret = None
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
found_vault_secret = True
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
try:
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
except AnsibleError as exc:
display.warning('Error getting vault password file (%s): %s' % (vault_id_name, to_text(exc)))
last_exception = exc
continue
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
last_exception = exc
continue
found_vault_secret = True
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
# An invalid or missing password file will error globally
# if no valid vault secret was found.
if last_exception and not found_vault_secret:
raise last_exception
return vault_secrets
@staticmethod
def _get_secret(prompt):
secret = getpass.getpass(prompt=prompt)
if secret:
secret = to_unsafe_text(secret)
return secret
@staticmethod
def ask_passwords():
""" prompt for connection and become passwords if needed """
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
become_prompt = "%s password: " % become_prompt_method
if op['ask_pass']:
sshpass = CLI._get_secret("SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
elif op['connection_password_file']:
sshpass = CLI.get_password_from_file(op['connection_password_file'])
if op['become_ask_pass']:
becomepass = CLI._get_secret(become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
elif op['become_password_file']:
becomepass = CLI.get_password_from_file(op['become_password_file'])
except EOFError:
pass
return (sshpass, becomepass)
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
""" check for conflicting options """
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, usage="", desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(self.name, usage=usage, desc=desc, epilog=epilog)
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, string_types):
options.listofhosts = string_types.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
# More specifically, we want `--tags` to be additive. So we cannot
# simply change C.TAGS_RUN's default to ["all"] because then passing
# --tags foo would cause us to have ['all', 'foo']
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# Make sure path argument doesn't have a backslash
if hasattr(options, 'action') and options.action in ['install', 'download'] and hasattr(options, 'args'):
options.args = [path.rstrip("/") for path in options.args]
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, string_types):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
try:
options = self.parser.parse_args(self.args[1:])
except SystemExit as ex:
if ex.code != 0:
self.parser.exit(status=2, message=" \n%s" % self.parser.format_help())
raise
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
""" return full ansible version info """
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
""" find reasonable way to display text """
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif CLI.PAGER:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text)
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text):
""" pipe text through a pager """
if 'less' in CLI.PAGER:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(CLI.PAGER, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@staticmethod
def _play_prereqs():
# TODO: evaluate moving all of the code that touches ``AnsibleCollectionConfig``
# into ``init_plugin_loader`` so that we can specifically remove
# ``AnsibleCollectionConfig.playbook_paths`` to make it immutable after instantiation
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
AnsibleCollectionConfig.playbook_paths = basedir
default_collection = _get_collection_name_from_path(basedir)
if default_collection:
display.warning(u'running with default collection {0}'.format(default_collection))
AnsibleCollectionConfig.default_collection = default_collection
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'], cache=(not options.get('flush_cache')))
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified inventory, host pattern and/or --limit leaves us with no hosts to target.")
return hosts
@staticmethod
def get_password_from_file(pwd_file):
b_pwd_file = to_bytes(pwd_file)
secret = None
if b_pwd_file == b'-':
# ensure its read as bytes
secret = sys.stdin.buffer.read()
elif not os.path.exists(b_pwd_file):
raise AnsibleError("The password file %s was not found" % pwd_file)
elif is_executable(b_pwd_file):
display.vvvv(u'The password file %s is a script.' % to_text(pwd_file))
cmd = [b_pwd_file]
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem occurred when trying to run the password script %s (%s)."
" If this is not a script, remove the executable bit from the file." % (pwd_file, e))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("The password script %s returned an error (rc=%s): %s" % (pwd_file, p.returncode, stderr))
secret = stdout
else:
try:
with open(b_pwd_file, "rb") as f:
secret = f.read().strip()
except (OSError, IOError) as e:
raise AnsibleError("Could not read password file %s: %s" % (pwd_file, e))
secret = secret.strip(b'\r\n')
if not secret:
raise AnsibleError('Empty password was provided from file (%s)' % pwd_file)
return to_unsafe_text(secret)
@classmethod
def cli_executor(cls, args=None):
if args is None:
args = sys.argv
try:
display.debug("starting run")
ansible_dir = Path(C.ANSIBLE_HOME).expanduser()
try:
ansible_dir.mkdir(mode=0o700)
except OSError as exc:
if exc.errno != errno.EEXIST:
display.warning(
"Failed to create the directory '%s': %s" % (ansible_dir, to_text(exc, errors='surrogate_or_replace'))
)
else:
display.debug("Created the '%s' directory" % ansible_dir)
try:
args = [to_text(a, errors='surrogate_or_strict') for a in args]
except UnicodeError:
display.error('Command line args are not in utf-8, unable to continue. Ansible currently only understands utf-8')
display.display(u"The full traceback was:\n\n%s" % to_text(traceback.format_exc()))
exit_code = 6
else:
cli = cls(args)
exit_code = cli.run()
except AnsibleOptionsError as e:
cli.parser.print_help()
display.error(to_text(e), wrap_text=False)
exit_code = 5
except AnsibleParserError as e:
display.error(to_text(e), wrap_text=False)
exit_code = 4
# TQM takes care of these, but leaving comment to reserve the exit codes
# except AnsibleHostUnreachable as e:
# display.error(str(e))
# exit_code = 3
# except AnsibleHostFailed as e:
# display.error(str(e))
# exit_code = 2
except AnsibleError as e:
display.error(to_text(e), wrap_text=False)
exit_code = 1
except KeyboardInterrupt:
display.error("User interrupted execution")
exit_code = 99
except Exception as e:
if C.DEFAULT_DEBUG:
# Show raw stacktraces in debug mode, It also allow pdb to
# enter post mortem mode.
raise
have_cli_options = bool(context.CLIARGS)
display.error("Unexpected Exception, this is probably a bug: %s" % to_text(e), wrap_text=False)
if not have_cli_options or have_cli_options and context.CLIARGS['verbosity'] > 2:
log_only = False
if hasattr(e, 'orig_exc'):
display.vvv('\nexception type: %s' % to_text(type(e.orig_exc)))
why = to_text(e.orig_exc)
if to_text(e) != why:
display.vvv('\noriginal msg: %s' % why)
else:
display.display("to see the full traceback, use -vvv")
log_only = True
display.display(u"the full traceback was:\n\n%s" % to_text(traceback.format_exc()), log_only=log_only)
exit_code = 250
sys.exit(exit_code)
| 28,096
|
Python
|
.py
| 558
| 38.65233
| 147
| 0.610894
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,321
|
doc.py
|
ansible_ansible/lib/ansible/cli/doc.py
|
#!/usr/bin/env python
# Copyright: (c) 2014, James Tanner <tanner.jc@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import importlib
import pkgutil
import os
import os.path
import re
import textwrap
import traceback
import ansible.plugins.loader as plugin_loader
from pathlib import Path
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.collections.list import list_collection_dirs
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError, AnsiblePluginNotFound
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.json import json_dump
from ansible.module_utils.common.yaml import yaml_dump
from ansible.module_utils.six import string_types
from ansible.parsing.plugin_docs import read_docstub
from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.list import list_plugins
from ansible.plugins.loader import action_loader, fragment_loader
from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_plugin_docs, get_docstring, get_versioned_doclink
display = Display()
TARGET_OPTIONS = C.DOCUMENTABLE_PLUGINS + ('role', 'keyword',)
PB_OBJECTS = ['Play', 'Role', 'Block', 'Task', 'Handler']
PB_LOADED = {}
SNIPPETS = ['inventory', 'lookup', 'module']
# hardcoded from ascii values
STYLE = {
'BLINK': '\033[5m',
'BOLD': '\033[1m',
'HIDE': '\033[8m',
# 'NORMAL': '\x01b[0m', # newer?
'NORMAL': '\033[0m',
'RESET': "\033[0;0m",
# 'REVERSE':"\033[;7m", # newer?
'REVERSE': "\033[7m",
'UNDERLINE': '\033[4m',
}
# previously existing string identifiers
NOCOLOR = {
'BOLD': r'*%s*',
'UNDERLINE': r'`%s`',
'MODULE': r'[%s]',
'PLUGIN': r'[%s]',
}
ref_style = {
'MODULE': C.COLOR_DOC_MODULE,
'REF': C.COLOR_DOC_REFERENCE,
'LINK': C.COLOR_DOC_LINK,
'DEP': C.COLOR_DOC_DEPRECATED,
'CONSTANT': C.COLOR_DOC_CONSTANT,
'PLUGIN': C.COLOR_DOC_PLUGIN,
}
def jdump(text):
try:
display.display(json_dump(text))
except TypeError as e:
display.vvv(traceback.format_exc())
raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue: %s' % to_native(e))
class RoleMixin(object):
"""A mixin containing all methods relevant to role argument specification functionality.
Note: The methods for actual display of role data are not present here.
"""
# Potential locations of the role arg spec file in the meta subdir, with main.yml
# having the lowest priority.
ROLE_METADATA_FILES = ["main" + e for e in C.YAML_FILENAME_EXTENSIONS]
ROLE_ARGSPEC_FILES = ['argument_specs' + e for e in C.YAML_FILENAME_EXTENSIONS] + ROLE_METADATA_FILES
def _load_role_data(self, root, files, role_name, collection):
""" Load and process the YAML for the first found of a set of role files
:param str root: The root path to get the files from
:param str files: List of candidate file names in order of precedence
:param str role_name: The name of the role for which we want the argspec data.
:param str collection: collection name or None in case of stand alone roles
:returns: A dict that contains the data requested, empty if no data found
"""
if collection:
meta_path = os.path.join(root, 'roles', role_name, 'meta')
else:
meta_path = os.path.join(root, 'meta')
# Check all potential spec files
for specfile in files:
full_path = os.path.join(meta_path, specfile)
if os.path.exists(full_path):
path = full_path
break
if path is None:
return {}
try:
with open(path, 'r') as f:
data = from_yaml(f.read(), file_name=path)
if data is None:
data = {}
except (IOError, OSError) as e:
raise AnsibleParserError("Could not read the role '%s' (at %s)" % (role_name, path), orig_exc=e)
return data
def _load_metadata(self, role_name, role_path, collection):
"""Load the roles metadata from the source file.
:param str role_name: The name of the role for which we want the argspec data.
:param str role_path: Path to the role/collection root.
:param str collection: collection name or None in case of stand alone roles
:returns: A dict of all role meta data, except ``argument_specs`` or an empty dict
"""
data = self._load_role_data(role_path, self.ROLE_METADATA_FILES, role_name, collection)
del data['argument_specs']
return data
def _load_argspec(self, role_name, role_path, collection):
"""Load the role argument spec data from the source file.
:param str role_name: The name of the role for which we want the argspec data.
:param str role_path: Path to the role/collection root.
:param str collection: collection name or None in case of stand alone roles
We support two files containing the role arg spec data: either meta/main.yml
or meta/argument_spec.yml. The argument_spec.yml file will take precedence
over the meta/main.yml file, if it exists. Data is NOT combined between the
two files.
:returns: A dict of all data underneath the ``argument_specs`` top-level YAML
key in the argspec data file. Empty dict is returned if there is no data.
"""
try:
data = self._load_role_data(role_path, self.ROLE_ARGSPEC_FILES, role_name, collection)
data = data.get('argument_specs', {})
except Exception as e:
# we keep error info, but let caller deal with it
data = {'error': 'Failed to process role (%s): %s' % (role_name, to_native(e)), 'exception': e}
return data
def _find_all_normal_roles(self, role_paths, name_filters=None):
"""Find all non-collection roles that have an argument spec file.
Note that argument specs do not actually need to exist within the spec file.
:param role_paths: A tuple of one or more role paths. When a role with the same name
is found in multiple paths, only the first-found role is returned.
:param name_filters: A tuple of one or more role names used to filter the results.
:returns: A set of tuples consisting of: role name, full role path
"""
found = set()
found_names = set()
for path in role_paths:
if not os.path.isdir(path):
continue
# Check each subdir for an argument spec file
for entry in os.listdir(path):
role_path = os.path.join(path, entry)
# Check all potential spec files
for specfile in self.ROLE_ARGSPEC_FILES:
full_path = os.path.join(role_path, 'meta', specfile)
if os.path.exists(full_path):
if name_filters is None or entry in name_filters:
# select first-found role
if entry not in found_names:
found_names.add(entry)
# None here stands for 'colleciton', which stand alone roles dont have
# makes downstream code simpler by having same structure as collection roles
found.add((entry, None, role_path))
# only read first existing spec
break
return found
def _find_all_collection_roles(self, name_filters=None, collection_filter=None):
"""Find all collection roles with an argument spec file.
Note that argument specs do not actually need to exist within the spec file.
:param name_filters: A tuple of one or more role names used to filter the results. These
might be fully qualified with the collection name (e.g., community.general.roleA)
or not (e.g., roleA).
:param collection_filter: A list of strings containing the FQCN of a collection which will
be used to limit results. This filter will take precedence over the name_filters.
:returns: A set of tuples consisting of: role name, collection name, collection path
"""
found = set()
b_colldirs = list_collection_dirs(coll_filter=collection_filter)
for b_path in b_colldirs:
path = to_text(b_path, errors='surrogate_or_strict')
collname = _get_collection_name_from_path(b_path)
roles_dir = os.path.join(path, 'roles')
if os.path.exists(roles_dir):
for entry in os.listdir(roles_dir):
# Check all potential spec files
for specfile in self.ROLE_ARGSPEC_FILES:
full_path = os.path.join(roles_dir, entry, 'meta', specfile)
if os.path.exists(full_path):
if name_filters is None:
found.add((entry, collname, path))
else:
# Name filters might contain a collection FQCN or not.
for fqcn in name_filters:
if len(fqcn.split('.')) == 3:
(ns, col, role) = fqcn.split('.')
if '.'.join([ns, col]) == collname and entry == role:
found.add((entry, collname, path))
elif fqcn == entry:
found.add((entry, collname, path))
break
return found
def _build_summary(self, role, collection, meta, argspec):
"""Build a summary dict for a role.
Returns a simplified role arg spec containing only the role entry points and their
short descriptions, and the role collection name (if applicable).
:param role: The simple role name.
:param collection: The collection containing the role (None or empty string if N/A).
:param meta: dictionary with galaxy information (None or empty string if N/A).
:param argspec: The complete role argspec data dict.
:returns: A tuple with the FQCN role name and a summary dict.
"""
if meta and meta.get('galaxy_info'):
summary = meta['galaxy_info']
else:
summary = {'description': 'UNDOCUMENTED'}
summary['entry_points'] = {}
if collection:
fqcn = '.'.join([collection, role])
summary['collection'] = collection
else:
fqcn = role
for ep in argspec.keys():
entry_spec = argspec[ep] or {}
summary['entry_points'][ep] = entry_spec.get('short_description', '')
return (fqcn, summary)
def _build_doc(self, role, path, collection, argspec, entry_point):
if collection:
fqcn = '.'.join([collection, role])
else:
fqcn = role
doc = {}
doc['path'] = path
doc['collection'] = collection
if 'error' in argspec:
doc.update(argspec)
else:
doc['entry_points'] = {}
for ep in argspec.keys():
if entry_point is None or ep == entry_point:
entry_spec = argspec[ep] or {}
doc['entry_points'][ep] = entry_spec
# If we didn't add any entry points (b/c of filtering), ignore this entry.
if len(doc['entry_points'].keys()) == 0:
doc = None
return (fqcn, doc)
def _create_role_list(self, fail_on_errors=True):
"""Return a dict describing the listing of all roles with arg specs.
:param role_paths: A tuple of one or more role paths.
:returns: A dict indexed by role name, with 'collection' and 'entry_points' keys per role.
Example return:
results = {
'roleA': {
'collection': '',
'entry_points': {
'main': 'Short description for main'
}
},
'a.b.c.roleB': {
'collection': 'a.b.c',
'entry_points': {
'main': 'Short description for main',
'alternate': 'Short description for alternate entry point'
}
'x.y.z.roleB': {
'collection': 'x.y.z',
'entry_points': {
'main': 'Short description for main',
}
},
}
"""
roles_path = self._get_roles_path()
collection_filter = self._get_collection_filter()
if not collection_filter:
roles = self._find_all_normal_roles(roles_path)
else:
roles = set()
collroles = self._find_all_collection_roles(collection_filter=collection_filter)
result = {}
for role, collection, role_path in (roles | collroles):
try:
meta = self._load_metadata(role, role_path, collection)
except Exception as e:
display.vvv('No metadata for role (%s) due to: %s' % (role, to_native(e)), True)
meta = {}
argspec = self._load_argspec(role, role_path, collection)
if 'error' in argspec:
if fail_on_errors:
raise argspec['exception']
else:
display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True)
continue
fqcn, summary = self._build_summary(role, collection, meta, argspec)
result[fqcn] = summary
return result
def _create_role_doc(self, role_names, entry_point=None, fail_on_errors=True):
"""
:param role_names: A tuple of one or more role names.
:param role_paths: A tuple of one or more role paths.
:param entry_point: A role entry point name for filtering.
:param fail_on_errors: When set to False, include errors in the JSON output instead of raising errors
:returns: A dict indexed by role name, with 'collection', 'entry_points', and 'path' keys per role.
"""
roles_path = self._get_roles_path()
roles = self._find_all_normal_roles(roles_path, name_filters=role_names)
collroles = self._find_all_collection_roles(name_filters=role_names)
result = {}
for role, collection, role_path in (roles | collroles):
argspec = self._load_argspec(role, role_path, collection)
if 'error' in argspec:
if fail_on_errors:
raise argspec['exception']
else:
display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True)
continue
fqcn, doc = self._build_doc(role, role_path, collection, argspec, entry_point)
if doc:
result[fqcn] = doc
return result
def _doclink(url):
# assume that if it is relative, it is for docsite, ignore rest
if not url.startswith(("http", "..")):
url = get_versioned_doclink(url)
return url
def _format(string, *args):
""" add ascii formatting or delimiters """
for style in args:
if style not in ref_style and style.upper() not in STYLE and style not in C.COLOR_CODES:
raise KeyError("Invalid format value supplied: %s" % style)
if C.ANSIBLE_NOCOLOR:
# ignore most styles, but some already had 'identifier strings'
if style in NOCOLOR:
string = NOCOLOR[style] % string
elif style in C.COLOR_CODES:
string = stringc(string, style)
elif style in ref_style:
# assumes refs are also always colors
string = stringc(string, ref_style[style])
else:
# start specific style and 'end' with normal
string = '%s%s%s' % (STYLE[style.upper()], string, STYLE['NORMAL'])
return string
class DocCLI(CLI, RoleMixin):
""" displays information on modules installed in Ansible libraries.
It displays a terse listing of plugins and their short descriptions,
provides a printout of their DOCUMENTATION strings,
and it can create a short "snippet" which can be pasted into a playbook. """
name = 'ansible-doc'
# default ignore list for detailed views
IGNORE = ('module', 'docuri', 'version_added', 'version_added_collection', 'short_description',
'now_date', 'plainexamples', 'returndocs', 'collection', 'plugin_name')
# Warning: If you add more elements here, you also need to add it to the docsite build (in the
# ansible-community/antsibull repo)
_ITALIC = re.compile(r"\bI\(([^)]+)\)")
_BOLD = re.compile(r"\bB\(([^)]+)\)")
_MODULE = re.compile(r"\bM\(([^)]+)\)")
_PLUGIN = re.compile(r"\bP\(([^#)]+)#([a-z]+)\)")
_LINK = re.compile(r"\bL\(([^)]+), *([^)]+)\)")
_URL = re.compile(r"\bU\(([^)]+)\)")
_REF = re.compile(r"\bR\(([^)]+), *([^)]+)\)")
_CONST = re.compile(r"\bC\(([^)]+)\)")
_SEM_PARAMETER_STRING = r"\(((?:[^\\)]+|\\.)+)\)"
_SEM_OPTION_NAME = re.compile(r"\bO" + _SEM_PARAMETER_STRING)
_SEM_OPTION_VALUE = re.compile(r"\bV" + _SEM_PARAMETER_STRING)
_SEM_ENV_VARIABLE = re.compile(r"\bE" + _SEM_PARAMETER_STRING)
_SEM_RET_VALUE = re.compile(r"\bRV" + _SEM_PARAMETER_STRING)
_RULER = re.compile(r"\bHORIZONTALLINE\b")
# helper for unescaping
_UNESCAPE = re.compile(r"\\(.)")
_FQCN_TYPE_PREFIX_RE = re.compile(r'^([^.]+\.[^.]+\.[^#]+)#([a-z]+):(.*)$')
_IGNORE_MARKER = 'ignore:'
# rst specific
_RST_NOTE = re.compile(r".. note::")
_RST_SEEALSO = re.compile(r".. seealso::")
_RST_ROLES = re.compile(r":\w+?:`")
_RST_DIRECTIVES = re.compile(r".. \w+?::")
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.plugin_list = set()
@staticmethod
def _tty_ify_sem_simle(matcher):
text = DocCLI._UNESCAPE.sub(r'\1', matcher.group(1))
return f"`{text}'"
@staticmethod
def _tty_ify_sem_complex(matcher):
text = DocCLI._UNESCAPE.sub(r'\1', matcher.group(1))
value = None
if '=' in text:
text, value = text.split('=', 1)
m = DocCLI._FQCN_TYPE_PREFIX_RE.match(text)
if m:
plugin_fqcn = m.group(1)
plugin_type = m.group(2)
text = m.group(3)
elif text.startswith(DocCLI._IGNORE_MARKER):
text = text[len(DocCLI._IGNORE_MARKER):]
plugin_fqcn = plugin_type = ''
else:
plugin_fqcn = plugin_type = ''
entrypoint = None
if ':' in text:
entrypoint, text = text.split(':', 1)
if value is not None:
text = f"{text}={value}"
if plugin_fqcn and plugin_type:
plugin_suffix = '' if plugin_type in ('role', 'module', 'playbook') else ' plugin'
plugin = f"{plugin_type}{plugin_suffix} {plugin_fqcn}"
if plugin_type == 'role' and entrypoint is not None:
plugin = f"{plugin}, {entrypoint} entrypoint"
return f"`{text}' (of {plugin})"
return f"`{text}'"
@classmethod
def tty_ify(cls, text):
# general formatting
t = cls._ITALIC.sub(_format(r"\1", 'UNDERLINE'), text) # no ascii code for this
t = cls._BOLD.sub(_format(r"\1", 'BOLD'), t)
t = cls._MODULE.sub(_format(r"\1", 'MODULE'), t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._LINK.sub(r"\1 <\2>", t) # L(word, url) => word <url>
t = cls._PLUGIN.sub(_format("[" + r"\1" + "]", 'PLUGIN'), t) # P(word#type) => [word]
t = cls._REF.sub(_format(r"\1", 'REF'), t) # R(word, sphinx-ref) => word
t = cls._CONST.sub(_format(r"`\1'", 'CONSTANT'), t)
t = cls._SEM_OPTION_NAME.sub(cls._tty_ify_sem_complex, t) # O(expr)
t = cls._SEM_OPTION_VALUE.sub(cls._tty_ify_sem_simle, t) # V(expr)
t = cls._SEM_ENV_VARIABLE.sub(cls._tty_ify_sem_simle, t) # E(expr)
t = cls._SEM_RET_VALUE.sub(cls._tty_ify_sem_complex, t) # RV(expr)
t = cls._RULER.sub("\n{0}\n".format("-" * 13), t) # HORIZONTALLINE => -------
# remove rst
t = cls._RST_SEEALSO.sub(r"See also:", t) # seealso to See also:
t = cls._RST_NOTE.sub(_format(r"Note:", 'bold'), t) # .. note:: to note:
t = cls._RST_ROLES.sub(r"`", t) # remove :ref: and other tags, keep tilde to match ending one
t = cls._RST_DIRECTIVES.sub(r"", t) # remove .. stuff:: in general
# handle docsite refs
# U(word) => word
t = re.sub(cls._URL, lambda m: _format(r"%s" % _doclink(m.group(1)), 'LINK'), t)
# L(word, url) => word <url>
t = re.sub(cls._LINK, lambda m: r"%s <%s>" % (m.group(1), _format(_doclink(m.group(2)), 'LINK')), t)
return t
def init_parser(self):
coll_filter = 'A supplied argument will be used for filtering, can be a namespace or full collection name.'
super(DocCLI, self).init_parser(
desc="plugin documentation tool",
epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
# targets
self.parser.add_argument('args', nargs='*', help='Plugin', metavar='plugin')
self.parser.add_argument("-t", "--type", action="store", default='module', dest='type',
help='Choose which plugin type (defaults to "module"). '
'Available plugin types are : {0}'.format(TARGET_OPTIONS),
choices=TARGET_OPTIONS)
# formatting
self.parser.add_argument("-j", "--json", action="store_true", default=False, dest='json_format',
help='Change output into json format.')
# TODO: warn if not used with -t roles
# role-specific options
self.parser.add_argument("-r", "--roles-path", dest='roles_path', default=C.DEFAULT_ROLES_PATH,
type=opt_help.unfrack_path(pathsep=True),
action=opt_help.PrependListAction,
help='The path to the directory containing your roles.')
# exclusive modifiers
exclusive = self.parser.add_mutually_exclusive_group()
# TODO: warn if not used with -t roles
exclusive.add_argument("-e", "--entry-point", dest="entry_point",
help="Select the entry point for role(s).")
# TODO: warn with --json as it is incompatible
exclusive.add_argument("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for these plugin types: %s' % ', '.join(SNIPPETS))
# TODO: warn when arg/plugin is passed
exclusive.add_argument("-F", "--list_files", action="store_true", default=False, dest="list_files",
help='Show plugin names and their source files without summaries (implies --list). %s' % coll_filter)
exclusive.add_argument("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available plugins. %s' % coll_filter)
exclusive.add_argument("--metadata-dump", action="store_true", default=False, dest='dump',
help='**For internal use only** Dump json metadata for all entries, ignores other options.')
# generic again
self.parser.add_argument("--no-fail-on-errors", action="store_true", default=False, dest='no_fail_on_errors',
help='**For internal use only** Only used for --metadata-dump. '
'Do not fail on errors. Report the error message in the JSON instead.')
def post_process_args(self, options):
options = super(DocCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def display_plugin_list(self, results):
# format for user
displace = max(len(x) for x in results.keys())
linelimit = display.columns - displace - 5
text = []
deprecated = []
# format display per option
if context.CLIARGS['list_files']:
# list plugin file names
for plugin in sorted(results.keys()):
filename = to_native(results[plugin])
# handle deprecated for builtin/legacy
pbreak = plugin.split('.')
if pbreak[-1].startswith('_') and pbreak[0] == 'ansible' and pbreak[1] in ('builtin', 'legacy'):
pbreak[-1] = pbreak[-1][1:]
plugin = '.'.join(pbreak)
deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
else:
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
else:
# list plugin names and short desc
for plugin in sorted(results.keys()):
desc = DocCLI.tty_ify(results[plugin])
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
pbreak = plugin.split('.')
# TODO: add mark for deprecated collection plugins
if pbreak[-1].startswith('_') and plugin.startswith(('ansible.builtin.', 'ansible.legacy.')):
# Handle deprecated ansible.builtin plugins
pbreak[-1] = pbreak[-1][1:]
plugin = '.'.join(pbreak)
deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
# display results
DocCLI.pager("\n".join(text))
def _display_available_roles(self, list_json):
"""Display all roles we can find with a valid argument specification.
Output is: fqcn role name, entry point, short description
"""
roles = list(list_json.keys())
entry_point_names = set() # to find max len
for role in roles:
for entry_point in list_json[role]['entry_points'].keys():
entry_point_names.add(entry_point)
max_role_len = 0
max_ep_len = 0
if entry_point_names:
max_ep_len = max(len(x) for x in entry_point_names)
linelimit = display.columns - max_role_len - max_ep_len - 5
text = []
for role in sorted(roles):
if list_json[role]['entry_points']:
text.append('%s:' % role)
text.append(' specs:')
for entry_point, desc in list_json[role]['entry_points'].items():
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
text.append(" %-*s: %s" % (max_ep_len, entry_point, desc))
else:
text.append('%s' % role)
# display results
DocCLI.pager("\n".join(text))
def _display_role_doc(self, role_json):
roles = list(role_json.keys())
text = []
for role in roles:
try:
if 'error' in role_json[role]:
display.warning("Skipping role '%s' due to: %s" % (role, role_json[role]['error']), True)
continue
text += self.get_role_man_text(role, role_json[role])
except AnsibleParserError as e:
# TODO: warn and skip role?
raise AnsibleParserError("Role '%s" % (role), orig_exc=e)
# display results
DocCLI.pager("\n".join(text))
@staticmethod
def _list_keywords():
return from_yaml(pkgutil.get_data('ansible', 'keyword_desc.yml'))
@staticmethod
def _get_keywords_docs(keys):
data = {}
descs = DocCLI._list_keywords()
for key in keys:
if key.startswith('with_'):
# simplify loops, dont want to handle every with_<lookup> combo
keyword = 'loop'
elif key == 'async':
# cause async became reserved in python we had to rename internally
keyword = 'async_val'
else:
keyword = key
try:
# if no desc, typeerror raised ends this block
kdata = {'description': descs[key]}
# get playbook objects for keyword and use first to get keyword attributes
kdata['applies_to'] = []
for pobj in PB_OBJECTS:
if pobj not in PB_LOADED:
obj_class = 'ansible.playbook.%s' % pobj.lower()
loaded_class = importlib.import_module(obj_class)
PB_LOADED[pobj] = getattr(loaded_class, pobj, None)
if keyword in PB_LOADED[pobj].fattributes:
kdata['applies_to'].append(pobj)
# we should only need these once
if 'type' not in kdata:
fa = PB_LOADED[pobj].fattributes.get(keyword)
if getattr(fa, 'private'):
kdata = {}
raise KeyError
kdata['type'] = getattr(fa, 'isa', 'string')
if keyword.endswith('when') or keyword in ('until',):
# TODO: make this a field attribute property,
# would also helps with the warnings on {{}} stacking
kdata['template'] = 'implicit'
elif getattr(fa, 'static'):
kdata['template'] = 'static'
else:
kdata['template'] = 'explicit'
# those that require no processing
for visible in ('alias', 'priority'):
kdata[visible] = getattr(fa, visible)
# remove None keys
for k in list(kdata.keys()):
if kdata[k] is None:
del kdata[k]
data[key] = kdata
except (AttributeError, KeyError) as e:
display.warning("Skipping Invalid keyword '%s' specified: %s" % (key, to_text(e)))
if display.verbosity >= 3:
display.verbose(traceback.format_exc())
return data
def _get_collection_filter(self):
coll_filter = None
if len(context.CLIARGS['args']) >= 1:
coll_filter = context.CLIARGS['args']
for coll_name in coll_filter:
if not AnsibleCollectionRef.is_valid_collection_name(coll_name):
raise AnsibleError('Invalid collection name (must be of the form namespace.collection): {0}'.format(coll_name))
return coll_filter
def _list_plugins(self, plugin_type, content):
results = {}
self.plugins = {}
loader = DocCLI._prep_loader(plugin_type)
coll_filter = self._get_collection_filter()
self.plugins.update(list_plugins(plugin_type, coll_filter))
# get appropriate content depending on option
if content == 'dir':
results = self._get_plugin_list_descriptions(loader)
elif content == 'files':
results = {k: self.plugins[k][0] for k in self.plugins.keys()}
else:
results = {k: {} for k in self.plugins.keys()}
self.plugin_list = set() # reset for next iteration
return results
def _get_plugins_docs(self, plugin_type, names, fail_ok=False, fail_on_errors=True):
loader = DocCLI._prep_loader(plugin_type)
# get the docs for plugins in the command line list
plugin_docs = {}
for plugin in names:
doc = {}
try:
doc, plainexamples, returndocs, metadata = get_plugin_docs(plugin, plugin_type, loader, fragment_loader, (context.CLIARGS['verbosity'] > 0))
except AnsiblePluginNotFound as e:
display.warning(to_native(e))
continue
except Exception as e:
if not fail_on_errors:
plugin_docs[plugin] = {'error': 'Missing documentation or could not parse documentation: %s' % to_native(e)}
continue
display.vvv(traceback.format_exc())
msg = "%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, to_native(e))
if fail_ok:
display.warning(msg)
else:
raise AnsibleError(msg)
if not doc:
# The doc section existed but was empty
if not fail_on_errors:
plugin_docs[plugin] = {'error': 'No valid documentation found'}
continue
docs = DocCLI._combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata)
if not fail_on_errors:
# Check whether JSON serialization would break
try:
json_dump(docs)
except Exception as e: # pylint:disable=broad-except
plugin_docs[plugin] = {'error': 'Cannot serialize documentation as JSON: %s' % to_native(e)}
continue
plugin_docs[plugin] = docs
return plugin_docs
def _get_roles_path(self):
"""
Add any 'roles' subdir in playbook dir to the roles search path.
And as a last resort, add the playbook dir itself. Order being:
- 'roles' subdir of playbook dir
- DEFAULT_ROLES_PATH (default in cliargs)
- playbook dir (basedir)
NOTE: This matches logic in RoleDefinition._load_role_path() method.
"""
roles_path = context.CLIARGS['roles_path']
if context.CLIARGS['basedir'] is not None:
subdir = os.path.join(context.CLIARGS['basedir'], "roles")
if os.path.isdir(subdir):
roles_path = (subdir,) + roles_path
roles_path = roles_path + (context.CLIARGS['basedir'],)
return roles_path
@staticmethod
def _prep_loader(plugin_type):
""" return a plugint type specific loader """
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
# add to plugin paths from command line
if context.CLIARGS['basedir'] is not None:
loader.add_directory(context.CLIARGS['basedir'], with_subdir=True)
if context.CLIARGS['module_path']:
for path in context.CLIARGS['module_path']:
if path:
loader.add_directory(path)
# save only top level paths for errors
loader._paths = None # reset so we can use subdirs later
return loader
def run(self):
super(DocCLI, self).run()
basedir = context.CLIARGS['basedir']
plugin_type = context.CLIARGS['type'].lower()
do_json = context.CLIARGS['json_format'] or context.CLIARGS['dump']
listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir']
no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
if context.CLIARGS['list_files']:
content = 'files'
elif context.CLIARGS['list_dir']:
content = 'dir'
else:
content = None
docs = {}
if basedir:
AnsibleCollectionConfig.playbook_paths = basedir
if plugin_type not in TARGET_OPTIONS:
raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
if context.CLIARGS['dump']:
# we always dump all types, ignore restrictions
ptypes = TARGET_OPTIONS
docs['all'] = {}
for ptype in ptypes:
if ptype == 'role':
roles = self._create_role_list(fail_on_errors=no_fail)
docs['all'][ptype] = self._create_role_doc(roles.keys(), context.CLIARGS['entry_point'], fail_on_errors=no_fail)
elif ptype == 'keyword':
names = DocCLI._list_keywords()
docs['all'][ptype] = DocCLI._get_keywords_docs(names.keys())
else:
plugin_names = self._list_plugins(ptype, None)
docs['all'][ptype] = self._get_plugins_docs(ptype, plugin_names, fail_ok=(ptype in ('test', 'filter')), fail_on_errors=no_fail)
# reset list after each type to avoid pollution
elif listing:
if plugin_type == 'keyword':
docs = DocCLI._list_keywords()
elif plugin_type == 'role':
docs = self._create_role_list(fail_on_errors=False)
else:
docs = self._list_plugins(plugin_type, content)
else:
# here we require a name
if len(context.CLIARGS['args']) == 0:
raise AnsibleOptionsError("Missing name(s), incorrect options passed for detailed documentation.")
if plugin_type == 'keyword':
docs = DocCLI._get_keywords_docs(context.CLIARGS['args'])
elif plugin_type == 'role':
docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'], fail_on_errors=no_fail)
else:
# display specific plugin docs
docs = self._get_plugins_docs(plugin_type, context.CLIARGS['args'])
# Display the docs
if do_json:
jdump(docs)
else:
text = []
if plugin_type in C.DOCUMENTABLE_PLUGINS:
if listing and docs:
self.display_plugin_list(docs)
elif context.CLIARGS['show_snippet']:
if plugin_type not in SNIPPETS:
raise AnsibleError('Snippets are only available for the following plugin'
' types: %s' % ', '.join(SNIPPETS))
for plugin, doc_data in docs.items():
try:
textret = DocCLI.format_snippet(plugin, plugin_type, doc_data['doc'])
except ValueError as e:
display.warning("Unable to construct a snippet for"
" '{0}': {1}".format(plugin, to_text(e)))
else:
text.append(textret)
else:
# Some changes to how plain text docs are formatted
for plugin, doc_data in docs.items():
textret = DocCLI.format_plugin_doc(plugin, plugin_type,
doc_data['doc'], doc_data['examples'],
doc_data['return'], doc_data['metadata'])
if textret:
text.append(textret)
else:
display.warning("No valid documentation was retrieved from '%s'" % plugin)
elif plugin_type == 'role':
if context.CLIARGS['list_dir'] and docs:
self._display_available_roles(docs)
elif docs:
self._display_role_doc(docs)
elif docs:
text = DocCLI.tty_ify(DocCLI._dump_yaml(docs))
if text:
DocCLI.pager(''.join(text))
return 0
@staticmethod
def get_all_plugins_of_type(plugin_type):
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
paths = loader._get_paths_with_context()
plugins = {}
for path_context in paths:
plugins.update(list_plugins(plugin_type))
return sorted(plugins.keys())
@staticmethod
def get_plugin_metadata(plugin_type, plugin_name):
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
result = loader.find_plugin_with_context(plugin_name, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if not result.resolved:
raise AnsibleError("unable to load {0} plugin named {1} ".format(plugin_type, plugin_name))
filename = result.plugin_resolved_path
collection_name = result.plugin_resolved_collection
try:
doc, __, __, __ = get_docstring(filename, fragment_loader, verbose=(context.CLIARGS['verbosity'] > 0),
collection_name=collection_name, plugin_type=plugin_type)
except Exception:
display.vvv(traceback.format_exc())
raise AnsibleError("%s %s at %s has a documentation formatting error or is missing documentation." % (plugin_type, plugin_name, filename))
if doc is None:
# Removed plugins don't have any documentation
return None
return dict(
name=plugin_name,
namespace=DocCLI.namespace_from_plugin_filepath(filename, plugin_name, loader.package_path),
description=doc.get('short_description', "UNKNOWN"),
version_added=doc.get('version_added', "UNKNOWN")
)
@staticmethod
def namespace_from_plugin_filepath(filepath, plugin_name, basedir):
if not basedir.endswith('/'):
basedir += '/'
rel_path = filepath.replace(basedir, '')
extension_free = os.path.splitext(rel_path)[0]
namespace_only = extension_free.rsplit(plugin_name, 1)[0].strip('/_')
clean_ns = namespace_only.replace('/', '.')
if clean_ns == '':
clean_ns = None
return clean_ns
@staticmethod
def _combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
# generate extra data
if plugin_type == 'module':
# is there corresponding action plugin?
if plugin in action_loader:
doc['has_action'] = True
else:
doc['has_action'] = False
# return everything as one dictionary
return {'doc': doc, 'examples': plainexamples, 'return': returndocs, 'metadata': metadata}
@staticmethod
def format_snippet(plugin, plugin_type, doc):
""" return heavily commented plugin use to insert into play """
if plugin_type == 'inventory' and doc.get('options', {}).get('plugin'):
# these do not take a yaml config that we can write a snippet for
raise ValueError('The {0} inventory plugin does not take YAML type config source'
' that can be used with the "auto" plugin so a snippet cannot be'
' created.'.format(plugin))
text = []
if plugin_type == 'lookup':
text = _do_lookup_snippet(doc)
elif 'options' in doc:
text = _do_yaml_snippet(doc)
text.append('')
return "\n".join(text)
@staticmethod
def format_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
collection_name = doc['collection']
# TODO: do we really want this?
# add_collection_to_versions_and_dates(doc, '(unknown)', is_module=(plugin_type == 'module'))
# remove_current_collection_from_versions_and_dates(doc, collection_name, is_module=(plugin_type == 'module'))
# remove_current_collection_from_versions_and_dates(
# returndocs, collection_name, is_module=(plugin_type == 'module'), return_docs=True)
# assign from other sections
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
doc['metadata'] = metadata
try:
text = DocCLI.get_man_text(doc, collection_name, plugin_type)
except Exception as e:
display.vvv(traceback.format_exc())
raise AnsibleError("Unable to retrieve documentation from '%s'" % (plugin), orig_exc=e)
return text
def _get_plugin_list_descriptions(self, loader):
descs = {}
for plugin in self.plugins.keys():
# TODO: move to plugin itself i.e: plugin.get_desc()
doc = None
filename = Path(to_native(self.plugins[plugin][0]))
docerror = None
try:
doc = read_docstub(filename)
except Exception as e:
docerror = e
# plugin file was empty or had error, lets try other options
if doc is None:
# handle test/filters that are in file with diff name
base = plugin.split('.')[-1]
basefile = filename.with_name(base + filename.suffix)
for extension in C.DOC_EXTENSIONS:
docfile = basefile.with_suffix(extension)
try:
if docfile.exists():
doc = read_docstub(docfile)
except Exception as e:
docerror = e
if docerror:
display.warning("%s has a documentation formatting error: %s" % (plugin, docerror))
continue
if not doc or not isinstance(doc, dict):
desc = 'UNDOCUMENTED'
else:
desc = doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip()
descs[plugin] = desc
return descs
@staticmethod
def print_paths(finder):
""" Returns a string suitable for printing of the search path """
# Uses a list to get the order right
ret = []
for i in finder._get_paths(subdirs=False):
i = to_text(i, errors='surrogate_or_strict')
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
@staticmethod
def _dump_yaml(struct, flow_style=False):
return yaml_dump(struct, default_flow_style=flow_style, default_style="''", Dumper=AnsibleDumper).rstrip('\n')
@staticmethod
def _indent_lines(text, indent):
return DocCLI.tty_ify('\n'.join([indent + line for line in text.split('\n')]))
@staticmethod
def _format_version_added(version_added, version_added_collection=None):
if version_added_collection == 'ansible.builtin':
version_added_collection = 'ansible-core'
# In ansible-core, version_added can be 'historical'
if version_added == 'historical':
return 'historical'
if version_added_collection:
version_added = '%s of %s' % (version_added, version_added_collection)
return 'version %s' % (version_added, )
@staticmethod
def warp_fill(text, limit, initial_indent='', subsequent_indent='', **kwargs):
result = []
for paragraph in text.split('\n\n'):
result.append(textwrap.fill(paragraph, limit, initial_indent=initial_indent, subsequent_indent=subsequent_indent,
break_on_hyphens=False, break_long_words=False, drop_whitespace=True, **kwargs))
initial_indent = subsequent_indent
return '\n'.join(result)
@staticmethod
def add_fields(text, fields, limit, opt_indent, return_values=False, base_indent='', man=False):
for o in sorted(fields):
# Create a copy so we don't modify the original (in case YAML anchors have been used)
opt = dict(fields[o])
# required is used as indicator and removed
required = opt.pop('required', False)
if not isinstance(required, bool):
raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
opt_leadin = ' '
key = ''
if required:
if C.ANSIBLE_NOCOLOR:
opt_leadin = "="
key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'bold', 'red'))
else:
if C.ANSIBLE_NOCOLOR:
opt_leadin = "-"
key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'yellow'))
# description is specifically formatted and can either be string or list of strings
if 'description' not in opt:
raise AnsibleError("All (sub-)options and return values must have a 'description' field")
text.append('')
# TODO: push this to top of for and sort by size, create indent on largest key?
inline_indent = base_indent + ' ' * max((len(opt_indent) - len(o)) - len(base_indent), 2)
sub_indent = inline_indent + ' ' * (len(o) + 3)
if is_sequence(opt['description']):
for entry_idx, entry in enumerate(opt['description'], 1):
if not isinstance(entry, string_types):
raise AnsibleError("Expected string in description of %s at index %s, got %s" % (o, entry_idx, type(entry)))
if entry_idx == 1:
text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(entry), limit, initial_indent=inline_indent, subsequent_indent=sub_indent))
else:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(entry), limit, initial_indent=sub_indent, subsequent_indent=sub_indent))
else:
if not isinstance(opt['description'], string_types):
raise AnsibleError("Expected string in description of %s, got %s" % (o, type(opt['description'])))
text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(opt['description']), limit, initial_indent=inline_indent, subsequent_indent=sub_indent))
del opt['description']
suboptions = []
for subkey in ('options', 'suboptions', 'contains', 'spec'):
if subkey in opt:
suboptions.append((subkey, opt.pop(subkey)))
if not required and not return_values and 'default' not in opt:
opt['default'] = None
# sanitize config items
conf = {}
for config in ('env', 'ini', 'yaml', 'vars', 'keyword'):
if config in opt and opt[config]:
# Create a copy so we don't modify the original (in case YAML anchors have been used)
conf[config] = [dict(item) for item in opt.pop(config)]
for ignore in DocCLI.IGNORE:
for item in conf[config]:
if display.verbosity > 0 and 'version_added' in item:
item['added_in'] = DocCLI._format_version_added(item['version_added'], item.get('version_added_colleciton', 'ansible-core'))
if ignore in item:
del item[ignore]
# reformat cli optoins
if 'cli' in opt and opt['cli']:
conf['cli'] = []
for cli in opt['cli']:
if 'option' not in cli:
conf['cli'].append({'name': cli['name'], 'option': '--%s' % cli['name'].replace('_', '-')})
else:
conf['cli'].append(cli)
del opt['cli']
# add custom header for conf
if conf:
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({'set_via': conf}), opt_indent))
# these we handle at the end of generic option processing
version_added = opt.pop('version_added', None)
version_added_collection = opt.pop('version_added_collection', None)
# general processing for options
for k in sorted(opt):
if k.startswith('_'):
continue
if is_sequence(opt[k]):
text.append(DocCLI._indent_lines('%s: %s' % (k, DocCLI._dump_yaml(opt[k], flow_style=True)), opt_indent))
else:
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k: opt[k]}), opt_indent))
if version_added and not man:
text.append("%sadded in: %s" % (opt_indent, DocCLI._format_version_added(version_added, version_added_collection)))
for subkey, subdata in suboptions:
text.append("%s%s:" % (opt_indent, subkey))
DocCLI.add_fields(text, subdata, limit, opt_indent + ' ', return_values, opt_indent)
def get_role_man_text(self, role, role_json):
"""Generate text for the supplied role suitable for display.
This is similar to get_man_text(), but roles are different enough that we have
a separate method for formatting their display.
:param role: The role name.
:param role_json: The JSON for the given role as returned from _create_role_doc().
:returns: A array of text suitable for displaying to screen.
"""
text = []
opt_indent = " "
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
text.append("> ROLE: %s (%s)" % (_format(role, 'BOLD'), role_json.get('path')))
for entry_point in role_json['entry_points']:
doc = role_json['entry_points'][entry_point]
desc = ''
if doc.get('short_description'):
desc = "- %s" % (doc.get('short_description'))
text.append('')
text.append("ENTRY POINT: %s %s" % (_format(entry_point, "BOLD"), desc))
text.append('')
if doc.get('description'):
if isinstance(doc['description'], list):
descs = doc['description']
else:
descs = [doc['description']]
for desc in descs:
text.append("%s" % DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
text.append('')
if doc.get('options'):
text.append(_format("Options", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
if doc.get('attributes', False):
display.deprecated(
f'The role {role}\'s argument spec {entry_point} contains the key "attributes", '
'which will not be displayed by ansible-doc in the future. '
'This was unintentionally allowed when plugin attributes were added, '
'but the feature does not map well to role argument specs.',
version='2.20',
collection_name='ansible.builtin',
)
text.append("")
text.append(_format("ATTRIBUTES:", 'bold'))
for k in doc['attributes'].keys():
text.append('')
text.append(DocCLI.warp_fill(DocCLI.tty_ify(_format('%s:' % k, 'UNDERLINE')), limit - 6, initial_indent=opt_indent,
subsequent_indent=opt_indent))
text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc['attributes'][k]), opt_indent))
del doc['attributes']
# generic elements we will handle identically
for k in ('author',):
if k not in doc:
continue
text.append('')
if isinstance(doc[k], string_types):
text.append('%s: %s' % (k.upper(), DocCLI.warp_fill(DocCLI.tty_ify(doc[k]),
limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(doc[k], (list, tuple)):
text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
else:
# use empty indent since this affects the start of the yaml doc, not it's keys
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
if doc.get('examples', False):
text.append('')
text.append(_format("EXAMPLES:", 'bold'))
if isinstance(doc['examples'], string_types):
text.append(doc.pop('examples').strip())
else:
try:
text.append(yaml_dump(doc.pop('examples'), indent=2, default_flow_style=False))
except Exception as e:
raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
return text
@staticmethod
def get_man_text(doc, collection_name='', plugin_type=''):
# Create a copy so we don't modify the original
doc = dict(doc)
DocCLI.IGNORE = DocCLI.IGNORE + (context.CLIARGS['type'],)
opt_indent = " "
base_indent = " "
text = []
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
text.append("> %s %s (%s)" % (plugin_type.upper(), _format(doc.pop('plugin_name'), 'bold'), doc.pop('filename')))
if isinstance(doc['description'], list):
descs = doc.pop('description')
else:
descs = [doc.pop('description')]
text.append('')
for desc in descs:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=base_indent, subsequent_indent=base_indent))
if display.verbosity > 0:
doc['added_in'] = DocCLI._format_version_added(doc.pop('version_added', 'historical'), doc.pop('version_added_collection', 'ansible-core'))
if doc.get('deprecated', False):
text.append(_format("DEPRECATED: ", 'bold', 'DEP'))
if isinstance(doc['deprecated'], dict):
if 'removed_at_date' not in doc['deprecated'] and 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
doc['deprecated']['removed_in'] = doc['deprecated']['version']
try:
text.append('\t' + C.config.get_deprecated_msg_from_config(doc['deprecated'], True, collection_name=collection_name))
except KeyError as e:
raise AnsibleError("Invalid deprecation documentation structure", orig_exc=e)
else:
text.append("%s" % doc['deprecated'])
del doc['deprecated']
if doc.pop('has_action', False):
text.append("")
text.append(_format(" * note:", 'bold') + " This module has a corresponding action plugin.")
if doc.get('options', False):
text.append("")
text.append(_format("OPTIONS", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent, man=(display.verbosity == 0))
if doc.get('attributes', False):
text.append("")
text.append(_format("ATTRIBUTES:", 'bold'))
for k in doc['attributes'].keys():
text.append('')
text.append(DocCLI.warp_fill(DocCLI.tty_ify(_format('%s:' % k, 'UNDERLINE')), limit - 6, initial_indent=opt_indent,
subsequent_indent=opt_indent))
text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc['attributes'][k]), opt_indent))
del doc['attributes']
if doc.get('notes', False):
text.append("")
text.append(_format("NOTES:", 'bold'))
for note in doc['notes']:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(note), limit - 6,
initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
del doc['notes']
if doc.get('seealso', False):
text.append("")
text.append(_format("SEE ALSO:", 'bold'))
for item in doc['seealso']:
if 'module' in item:
text.append(DocCLI.warp_fill(DocCLI.tty_ify('Module %s' % item['module']),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
description = item.get('description')
if description is None and item['module'].startswith('ansible.builtin.'):
description = 'The official documentation on the %s module.' % item['module']
if description is not None:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(description),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
if item['module'].startswith('ansible.builtin.'):
relative_url = 'collections/%s_module.html' % item['module'].replace('.', '/', 2)
text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent))
elif 'plugin' in item and 'plugin_type' in item:
plugin_suffix = ' plugin' if item['plugin_type'] not in ('module', 'role') else ''
text.append(DocCLI.warp_fill(DocCLI.tty_ify('%s%s %s' % (item['plugin_type'].title(), plugin_suffix, item['plugin'])),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
description = item.get('description')
if description is None and item['plugin'].startswith('ansible.builtin.'):
description = 'The official documentation on the %s %s%s.' % (item['plugin'], item['plugin_type'], plugin_suffix)
if description is not None:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(description),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
if item['plugin'].startswith('ansible.builtin.'):
relative_url = 'collections/%s_%s.html' % (item['plugin'].replace('.', '/', 2), item['plugin_type'])
text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent))
elif 'name' in item and 'link' in item and 'description' in item:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['name']),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['description']),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['link']),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
elif 'ref' in item and 'description' in item:
text.append(DocCLI.warp_fill(DocCLI.tty_ify('Ansible documentation [%s]' % item['ref']),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['description']),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink('/#stq=%s&stp=1' % item['ref'])),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
del doc['seealso']
if doc.get('requirements', False):
text.append('')
req = ", ".join(doc.pop('requirements'))
text.append(_format("REQUIREMENTS:", 'bold') + "%s\n" % DocCLI.warp_fill(DocCLI.tty_ify(req), limit - 16, initial_indent=" ",
subsequent_indent=opt_indent))
# Generic handler
for k in sorted(doc):
if not doc[k] or k in DocCLI.IGNORE:
continue
text.append('')
header = _format(k.upper(), 'bold')
if isinstance(doc[k], string_types):
text.append('%s: %s' % (header, DocCLI.warp_fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(doc[k], (list, tuple)):
text.append('%s: %s' % (header, ', '.join(doc[k])))
else:
# use empty indent since this affects the start of the yaml doc, not it's keys
text.append('%s: ' % header + DocCLI._indent_lines(DocCLI._dump_yaml(doc[k]), ' ' * (len(k) + 2)))
del doc[k]
if doc.get('plainexamples', False):
text.append('')
text.append(_format("EXAMPLES:", 'bold'))
if isinstance(doc['plainexamples'], string_types):
text.append(doc.pop('plainexamples').strip())
else:
try:
text.append(yaml_dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
except Exception as e:
raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
if doc.get('returndocs', False):
text.append('')
text.append(_format("RETURN VALUES:", 'bold'))
DocCLI.add_fields(text, doc.pop('returndocs'), limit, opt_indent, return_values=True, man=(display.verbosity == 0))
text.append('\n')
return "\n".join(text)
def _do_yaml_snippet(doc):
text = []
mdesc = DocCLI.tty_ify(doc['short_description'])
module = doc.get('module')
if module:
# this is actually a usable task!
text.append("- name: %s" % (mdesc))
text.append(" %s:" % (module))
else:
# just a comment, hopefully useful yaml file
text.append("# %s:" % doc.get('plugin', doc.get('name')))
pad = 29
subdent = '# '.rjust(pad + 2)
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
if isinstance(opt['description'], string_types):
desc = DocCLI.tty_ify(opt['description'])
else:
desc = DocCLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
o = '%s:' % o
if module:
if required:
desc = "(required) %s" % desc
text.append(" %-20s # %s" % (o, DocCLI.warp_fill(desc, limit, subsequent_indent=subdent)))
else:
if required:
default = '(required)'
else:
default = opt.get('default', 'None')
text.append("%s %-9s # %s" % (o, default, DocCLI.warp_fill(desc, limit, subsequent_indent=subdent, max_lines=3)))
return text
def _do_lookup_snippet(doc):
text = []
snippet = "lookup('%s', " % doc.get('plugin', doc.get('name'))
comment = []
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
comment.append('# %s(%s): %s' % (o, opt.get('type', 'string'), opt.get('description', '')))
if o in ('_terms', '_raw', '_list'):
# these are 'list of arguments'
snippet += '< %s >' % (o)
continue
required = opt.get('required', False)
if not isinstance(required, bool):
raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
if required:
default = '<REQUIRED>'
else:
default = opt.get('default', 'None')
if opt.get('type') in ('string', 'str'):
snippet += ", %s='%s'" % (o, default)
else:
snippet += ', %s=%s' % (o, default)
snippet += ")"
if comment:
text.extend(comment)
text.append('')
text.append(snippet)
return text
def main(args=None):
DocCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 70,451
|
Python
|
.py
| 1,318
| 39.861912
| 156
| 0.557502
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,322
|
inventory.py
|
ansible_ansible/lib/ansible/cli/inventory.py
|
#!/usr/bin/env python
# Copyright: (c) 2017, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import sys
import argparse
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.utils.vars import combine_vars
from ansible.utils.display import Display
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
display = Display()
class InventoryCLI(CLI):
""" used to display or dump the configured inventory as Ansible sees it """
name = 'ansible-inventory'
ARGUMENTS = {'group': 'The name of a group in the inventory, relevant when using --graph', }
def __init__(self, args):
super(InventoryCLI, self).__init__(args)
self.vm = None
self.loader = None
self.inventory = None
def init_parser(self):
super(InventoryCLI, self).init_parser(
usage='usage: %prog [options] [group]',
desc='Show Ansible inventory information, by default it uses the inventory script JSON format')
opt_help.add_inventory_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_basedir_options(self.parser)
opt_help.add_runtask_options(self.parser)
# remove unused default options
self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument)
self.parser.add_argument('args', metavar='group', nargs='?', help='The name of a group in the inventory, relevant when using --graph')
# Actions
action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!")
action_group.add_argument("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
action_group.add_argument("--host", action="store", default=None, dest='host',
help='Output specific host info, works as inventory script. It will ignore limit')
action_group.add_argument("--graph", action="store_true", default=False, dest='graph',
help='create inventory graph, if supplying pattern it must be a valid group name. It will ignore limit')
self.parser.add_argument_group(action_group)
# graph
self.parser.add_argument("-y", "--yaml", action="store_true", default=False, dest='yaml',
help='Use YAML format instead of default JSON, ignored for --graph')
self.parser.add_argument('--toml', action='store_true', default=False, dest='toml',
help='Use TOML format instead of default JSON, ignored for --graph')
self.parser.add_argument("--vars", action="store_true", default=False, dest='show_vars',
help='Add vars to graph display, ignored unless used with --graph')
# list
self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
help="When doing --list, represent in a way that is optimized for export,"
"not as an accurate representation of how Ansible has processed it")
self.parser.add_argument('--output', default=None, dest='output_file',
help="When doing --list, send the inventory to a file instead of to the screen")
# self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
# help="When doing --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
def post_process_args(self, options):
options = super(InventoryCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options)
# there can be only one! and, at least, one!
used = 0
for opt in (options.list, options.host, options.graph):
if opt:
used += 1
if used == 0:
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
elif used > 1:
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
# set host pattern to default if not supplied
if options.args:
options.pattern = options.args
else:
options.pattern = 'all'
return options
def run(self):
super(InventoryCLI, self).run()
# Initialize needed objects
self.loader, self.inventory, self.vm = self._play_prereqs()
results = None
if context.CLIARGS['host']:
hosts = self.inventory.get_hosts(context.CLIARGS['host'])
if len(hosts) != 1:
raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
myvars = self._get_host_variables(host=hosts[0])
# FIXME: should we template first?
results = self.dump(myvars)
else:
if context.CLIARGS['subset']:
# not doing single host, set limit in general if given
self.inventory.subset(context.CLIARGS['subset'])
if context.CLIARGS['graph']:
results = self.inventory_graph()
elif context.CLIARGS['list']:
top = self._get_group('all')
if context.CLIARGS['yaml']:
results = self.yaml_inventory(top)
elif context.CLIARGS['toml']:
results = self.toml_inventory(top)
else:
results = self.json_inventory(top)
results = self.dump(results)
if results:
outfile = context.CLIARGS['output_file']
if outfile is None:
# FIXME: pager?
display.display(results)
else:
try:
with open(to_bytes(outfile), 'wb') as f:
f.write(to_bytes(results))
except (OSError, IOError) as e:
raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
sys.exit(0)
sys.exit(1)
@staticmethod
def dump(stuff):
if context.CLIARGS['yaml']:
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
results = to_text(yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False, allow_unicode=True))
elif context.CLIARGS['toml']:
from ansible.plugins.inventory.toml import toml_dumps
try:
results = toml_dumps(stuff)
except TypeError as e:
raise AnsibleError(
'The source inventory contains a value that cannot be represented in TOML: %s' % e
)
except KeyError as e:
raise AnsibleError(
'The source inventory contains a non-string key (%s) which cannot be represented in TOML. '
'The specified key will need to be converted to a string. Be aware that if your playbooks '
'expect this key to be non-string, your playbooks will need to be modified to support this '
'change.' % e.args[0]
)
else:
import json
from ansible.parsing.ajson import AnsibleJSONEncoder
try:
results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True, ensure_ascii=False)
except TypeError as e:
results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=False, indent=4, preprocess_unsafe=True, ensure_ascii=False)
display.warning("Could not sort JSON output due to issues while sorting keys: %s" % to_native(e))
return results
def _get_group_variables(self, group):
# get info from inventory source
res = group.get_vars()
# Always load vars plugins
res = combine_vars(res, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [group], 'all'))
if context.CLIARGS['basedir']:
res = combine_vars(res, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [group], 'all'))
if group.priority != 1:
res['ansible_group_priority'] = group.priority
return self._remove_internal(res)
def _get_host_variables(self, host):
if context.CLIARGS['export']:
# only get vars defined directly host
hostvars = host.get_vars()
# Always load vars plugins
hostvars = combine_vars(hostvars, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [host], 'all'))
if context.CLIARGS['basedir']:
hostvars = combine_vars(hostvars, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [host], 'all'))
else:
# get all vars flattened by host, but skip magic hostvars
hostvars = self.vm.get_vars(host=host, include_hostvars=False, stage='all')
return self._remove_internal(hostvars)
def _get_group(self, gname):
group = self.inventory.groups.get(gname)
return group
@staticmethod
def _remove_internal(dump):
for internal in C.INTERNAL_STATIC_VARS:
if internal in dump:
del dump[internal]
return dump
@staticmethod
def _remove_empty_keys(dump):
# remove empty keys
for x in ('hosts', 'vars', 'children'):
if x in dump and not dump[x]:
del dump[x]
@staticmethod
def _show_vars(dump, depth):
result = []
for (name, val) in sorted(dump.items()):
result.append(InventoryCLI._graph_name('{%s = %s}' % (name, val), depth))
return result
@staticmethod
def _graph_name(name, depth=0):
if depth:
name = " |" * (depth) + "--%s" % name
return name
def _graph_group(self, group, depth=0):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
for kid in group.child_groups:
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
for host in group.hosts:
result.append(self._graph_name(host.name, depth))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_group_variables(group), depth))
return result
def inventory_graph(self):
start_at = self._get_group(context.CLIARGS['pattern'])
if start_at:
return '\n'.join(self._graph_group(start_at))
else:
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
def json_inventory(self, top):
seen_groups = set()
def format_group(group, available_hosts):
results = {}
results[group.name] = {}
if group.name != 'all':
results[group.name]['hosts'] = [h.name for h in group.hosts if h.name in available_hosts]
results[group.name]['children'] = []
for subgroup in group.child_groups:
results[group.name]['children'].append(subgroup.name)
if subgroup.name not in seen_groups:
results.update(format_group(subgroup, available_hosts))
seen_groups.add(subgroup.name)
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty_keys(results[group.name])
# remove empty groups
if not results[group.name]:
del results[group.name]
return results
hosts = self.inventory.get_hosts(top.name)
results = format_group(top, frozenset(h.name for h in hosts))
# populate meta
results['_meta'] = {'hostvars': {}}
for host in hosts:
hvars = self._get_host_variables(host)
if hvars:
results['_meta']['hostvars'][host.name] = hvars
return results
def yaml_inventory(self, top):
seen_hosts = set()
seen_groups = set()
def format_group(group, available_hosts):
results = {}
# initialize group + vars
results[group.name] = {}
# subgroups
results[group.name]['children'] = {}
for subgroup in group.child_groups:
if subgroup.name != 'all':
if subgroup.name in seen_groups:
results[group.name]['children'].update({subgroup.name: {}})
else:
results[group.name]['children'].update(format_group(subgroup, available_hosts))
seen_groups.add(subgroup.name)
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
for h in group.hosts:
if h.name not in available_hosts:
continue # observe limit
myvars = {}
if h.name not in seen_hosts: # avoid defining host vars more than once
seen_hosts.add(h.name)
myvars = self._get_host_variables(host=h)
results[group.name]['hosts'][h.name] = myvars
if context.CLIARGS['export']:
gvars = self._get_group_variables(group)
if gvars:
results[group.name]['vars'] = gvars
self._remove_empty_keys(results[group.name])
# remove empty groups
if not results[group.name]:
del results[group.name]
return results
available_hosts = frozenset(h.name for h in self.inventory.get_hosts(top.name))
return format_group(top, available_hosts)
def toml_inventory(self, top):
seen_hosts = set()
seen_hosts = set()
has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
def format_group(group, available_hosts):
results = {}
results[group.name] = {}
results[group.name]['children'] = []
for subgroup in group.child_groups:
if subgroup.name == 'ungrouped' and not has_ungrouped:
continue
if group.name != 'all':
results[group.name]['children'].append(subgroup.name)
results.update(format_group(subgroup, available_hosts))
if group.name != 'all':
for host in group.hosts:
if host.name not in available_hosts:
continue
if host.name not in seen_hosts:
seen_hosts.add(host.name)
host_vars = self._get_host_variables(host=host)
else:
host_vars = {}
try:
results[group.name]['hosts'][host.name] = host_vars
except KeyError:
results[group.name]['hosts'] = {host.name: host_vars}
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty_keys(results[group.name])
# remove empty groups
if not results[group.name]:
del results[group.name]
return results
available_hosts = frozenset(h.name for h in self.inventory.get_hosts(top.name))
results = format_group(top, available_hosts)
return results
def main(args=None):
InventoryCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 16,765
|
Python
|
.py
| 326
| 38.641104
| 153
| 0.588207
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,323
|
playbook.py
|
ansible_ansible/lib/ansible/cli/playbook.py
|
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import os
import stat
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.module_utils.common.text.converters import to_bytes
from ansible.playbook.block import Block
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
from ansible.utils.display import Display
display = Display()
class PlaybookCLI(CLI):
""" the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
See the project home page (https://docs.ansible.com) for more information. """
name = 'ansible-playbook'
def init_parser(self):
# create parser for CLI options
super(PlaybookCLI, self).init_parser(
usage="%prog [options] playbook.yml [playbook2 ...]",
desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.")
opt_help.add_connect_options(self.parser)
opt_help.add_meta_options(self.parser)
opt_help.add_runas_options(self.parser)
opt_help.add_subset_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
# ansible playbook specific opts
self.parser.add_argument('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
self.parser.add_argument('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
self.parser.add_argument('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
self.parser.add_argument('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
self.parser.add_argument('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
self.parser.add_argument('args', help='Playbook(s)', metavar='playbook', nargs='+')
def post_process_args(self, options):
# for listing, we need to know if user had tag input
# capture here as parent function sets defaults for tags
havetags = bool(options.tags or options.skip_tags)
options = super(PlaybookCLI, self).post_process_args(options)
if options.listtags:
# default to all tags (including never), when listing tags
# unless user specified tags
if not havetags:
options.tags = ['never', 'all']
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, fork_opts=True)
return options
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# manages passwords
sshpass = None
becomepass = None
passwords = {}
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
# also prep plugin paths
b_playbook_dirs = []
for playbook in context.CLIARGS['args']:
# resolve if it is collection playbook with FQCN notation, if not, leaves unchanged
resource = _get_collection_playbook_path(playbook)
if resource is not None:
playbook_collection = resource[2]
else:
# not an FQCN so must be a file
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# check if playbook is from collection (path can be passed directly)
playbook_collection = _get_collection_name_from_path(playbook)
# don't add collection playbooks to adjacency search path
if not playbook_collection:
# setup dirs to enable loading plugins from all playbooks in case they add callbacks/inventory/etc
b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
add_all_plugin_dirs(b_playbook_dir)
b_playbook_dirs.append(b_playbook_dir)
if b_playbook_dirs:
# allow collections adjacent to these playbooks
# we use list copy to avoid opening up 'adjacency' in the previous loop
AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
# don't deal with privilege escalation or passwords when we don't need to
if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
# create base objects
loader, inventory, variable_manager = self._play_prereqs()
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
CLI.get_host_list(inventory, context.CLIARGS['subset'])
# flush fact cache if requested
if context.CLIARGS['flush_cache']:
self._flush_cache(inventory, variable_manager)
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
variable_manager=variable_manager, loader=loader,
passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
if play._included_path is not None:
loader.set_basedir(play._included_path)
else:
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
loader.set_basedir(pb_dir)
# show host list if we were able to template into a list
try:
host_list = ','.join(play.hosts)
except TypeError:
host_list = ''
msg = "\n play #%d (%s): %s" % (idx + 1, host_list, play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
if context.CLIARGS['listhosts']:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
display.display(msg)
all_tags = set()
if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
taskmsg = ''
if context.CLIARGS['listtasks']:
taskmsg = ' tasks:\n'
def _process_block(b):
taskmsg = ''
for task in b.block:
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
if task.action in C._ACTION_META and task.implicit:
continue
all_tags.update(task.tags)
if context.CLIARGS['listtasks']:
cur_tags = list(mytags.union(set(task.tags)))
cur_tags.sort()
if task.name:
taskmsg += " %s" % task.get_name()
else:
taskmsg += " %s" % task.action
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
return taskmsg
all_vars = variable_manager.get_vars(play=play)
for block in play.compile():
block = block.filter_tagged_tasks(all_vars)
if not block.has_tasks():
continue
taskmsg += _process_block(block)
if context.CLIARGS['listtags']:
cur_tags = list(mytags.union(all_tags))
cur_tags.sort()
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
return 0
else:
return results
@staticmethod
def _flush_cache(inventory, variable_manager):
for host in inventory.list_hosts():
hostname = host.get_name()
variable_manager.clear_facts(hostname)
def main(args=None):
PlaybookCLI.cli_executor(args)
if __name__ == '__main__':
main()
| 10,865
|
Python
|
.py
| 191
| 41.26178
| 124
| 0.572585
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,324
|
option_helpers.py
|
ansible_ansible/lib/ansible/cli/arguments/option_helpers.py
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import copy
import operator
import argparse
import os
import os.path
import sys
import time
from jinja2 import __version__ as j2_version
import ansible
from ansible import constants as C
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.yaml import HAS_LIBYAML, yaml_load
from ansible.release import __version__
from ansible.utils.path import unfrackpath
#
# Special purpose OptionParsers
#
class SortingHelpFormatter(argparse.HelpFormatter):
def add_arguments(self, actions):
actions = sorted(actions, key=operator.attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
class ArgumentParser(argparse.ArgumentParser):
def add_argument(self, *args, **kwargs):
action = kwargs.get('action')
help = kwargs.get('help')
if help and action in {'append', 'append_const', 'count', 'extend', PrependListAction}:
help = f'{help.rstrip(".")}. This argument may be specified multiple times.'
kwargs['help'] = help
return super().add_argument(*args, **kwargs)
class AnsibleVersion(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ansible_version = to_native(version(getattr(parser, 'prog')))
print(ansible_version)
parser.exit()
class UnrecognizedArgument(argparse.Action):
def __init__(self, option_strings, dest, const=True, default=None, required=False, help=None, metavar=None, nargs=0):
super(UnrecognizedArgument, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const,
default=default, required=required, help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.error('unrecognized arguments: %s' % option_string)
class PrependListAction(argparse.Action):
"""A near clone of ``argparse._AppendAction``, but designed to prepend list values
instead of appending.
"""
def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None,
choices=None, required=False, help=None, metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
super(PrependListAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar
)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(ensure_value(namespace, self.dest, []))
items[0:0] = values
setattr(namespace, self.dest, items)
def ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
#
# Callbacks to validate and normalize Options
#
def unfrack_path(pathsep=False, follow=True):
"""Turn an Option's data into a single path in Ansible locations"""
def inner(value):
if pathsep:
return [unfrackpath(x, follow=follow) for x in value.split(os.pathsep) if x]
if value == '-':
return value
return unfrackpath(value, follow=follow)
return inner
def maybe_unfrack_path(beacon):
def inner(value):
if value.startswith(beacon):
return beacon + unfrackpath(value[1:])
return value
return inner
def _git_repo_info(repo_path):
""" returns a string containing git branch, commit id and commit date """
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
with open(repo_path) as f:
gitdir = yaml_load(f).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
with open(os.path.join(repo_path, "HEAD")) as f:
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
with open(branch_path) as f:
commit = f.readline()[:10]
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
def _gitinfo():
basedir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
repo_path = os.path.join(basedir, '.git')
return _git_repo_info(repo_path)
def version(prog=None):
""" return ansible version """
if prog:
result = ["{0} [core {1}]".format(prog, __version__)]
else:
result = [__version__]
gitinfo = _gitinfo()
if gitinfo:
result[0] = "{0} {1}".format(result[0], gitinfo)
result.append(" config file = %s" % C.CONFIG_FILE)
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result.append(" configured module search path = %s" % cpath)
result.append(" ansible python module location = %s" % ':'.join(ansible.__path__))
result.append(" ansible collection location = %s" % ':'.join(C.COLLECTIONS_PATHS))
result.append(" executable location = %s" % sys.argv[0])
result.append(" python version = %s (%s)" % (''.join(sys.version.splitlines()), to_native(sys.executable)))
result.append(" jinja version = %s" % j2_version)
result.append(" libyaml = %s" % HAS_LIBYAML)
return "\n".join(result)
#
# Functions to add pre-canned options to an OptionParser
#
def create_base_parser(prog, usage="", desc=None, epilog=None):
"""
Create an options parser for all ansible scripts
"""
# base opts
parser = ArgumentParser(
prog=prog,
formatter_class=SortingHelpFormatter,
epilog=epilog,
description=desc,
conflict_handler='resolve',
)
version_help = "show program's version number, config file location, configured module search path," \
" module location, executable location and exit"
parser.add_argument('--version', action=AnsibleVersion, nargs=0, help=version_help)
add_verbosity_options(parser)
return parser
def add_verbosity_options(parser):
"""Add options for verbosity"""
parser.add_argument('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="Causes Ansible to print more debug messages. Adding multiple -v will increase the verbosity, "
"the builtin plugins currently evaluate up to -vvvvvv. A reasonable level to start is -vvv, "
"connection debugging might require -vvvv.")
def add_async_options(parser):
"""Add options for commands which can launch async tasks"""
parser.add_argument('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type=int, dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_argument('-B', '--background', dest='seconds', type=int, default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
def add_basedir_options(parser):
"""Add options for commands which can set a playbook basedir"""
parser.add_argument('--playbook-dir', default=C.PLAYBOOK_DIR, dest='basedir', action='store',
help="Since this tool does not use playbooks, use this as a substitute playbook directory. "
"This sets the relative path for many features including roles/ group_vars/ etc.",
type=unfrack_path())
def add_check_options(parser):
"""Add options for commands which can run with diagnostic information of tasks"""
parser.add_argument("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_argument("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those"
" files; works great with --check")
def add_connect_options(parser):
"""Add options for commands which need to connection to other hosts"""
connect_group = parser.add_argument_group("Connection Options", "control as whom and how to connect to hosts")
connect_group.add_argument('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', type=unfrack_path())
connect_group.add_argument('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_argument('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_argument('-T', '--timeout', default=None, type=int, dest='timeout',
help="override the connection timeout in seconds (default depends on connection)")
# ssh only
connect_group.add_argument('--ssh-common-args', default=None, dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_argument('--sftp-extra-args', default=None, dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_argument('--scp-extra-args', default=None, dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_argument('--ssh-extra-args', default=None, dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_argument_group(connect_group)
connect_password_group = parser.add_mutually_exclusive_group()
connect_password_group.add_argument('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_password_group.add_argument('--connection-password-file', '--conn-pass-file', default=C.CONNECTION_PASSWORD_FILE, dest='connection_password_file',
help="Connection password file", type=unfrack_path(), action='store')
parser.add_argument_group(connect_password_group)
def add_fork_options(parser):
"""Add options for commands that can fork worker processes"""
parser.add_argument('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type=int,
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
def add_inventory_options(parser):
"""Add options for commands that utilize inventory"""
parser.add_argument('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
parser.add_argument('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_argument('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
def add_meta_options(parser):
"""Add options for commands which can launch meta tasks from the command line"""
parser.add_argument('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_argument('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache for every host in inventory")
def add_module_options(parser):
"""Add options for commands that load modules"""
module_path = C.config.get_configuration_definition('DEFAULT_MODULE_PATH').get('default', '')
parser.add_argument('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % module_path,
type=unfrack_path(pathsep=True), action=PrependListAction)
def add_output_options(parser):
"""Add options for commands which can change their output"""
parser.add_argument('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_argument('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
def add_runas_options(parser):
"""
Add options for commands which can run tasks as another user
Note that this includes the options from add_runas_prompt_options(). Only one of these
functions should be used.
"""
runas_group = parser.add_argument_group("Privilege Escalation Options", "control how and which user you become as on target hosts")
# consolidated privilege escalation (become)
runas_group.add_argument("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_argument('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD,
help='privilege escalation method to use (default=%s)' % C.DEFAULT_BECOME_METHOD +
', use `ansible-doc -t become -l` to list valid choices.')
runas_group.add_argument('--become-user', default=None, dest='become_user', type=str,
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
parser.add_argument_group(runas_group)
add_runas_prompt_options(parser)
def add_runas_prompt_options(parser, runas_group=None):
"""
Add options for commands which need to prompt for privilege escalation credentials
Note that add_runas_options() includes these options already. Only one of the two functions
should be used.
"""
if runas_group is not None:
parser.add_argument_group(runas_group)
runas_pass_group = parser.add_mutually_exclusive_group()
runas_pass_group.add_argument('-K', '--ask-become-pass', dest='become_ask_pass', action='store_true',
default=C.DEFAULT_BECOME_ASK_PASS,
help='ask for privilege escalation password')
runas_pass_group.add_argument('--become-password-file', '--become-pass-file', default=C.BECOME_PASSWORD_FILE, dest='become_password_file',
help="Become password file", type=unfrack_path(), action='store')
parser.add_argument_group(runas_pass_group)
def add_runtask_options(parser):
"""Add options for commands that run a task"""
parser.add_argument('-e', '--extra-vars', dest="extra_vars", action="append", type=maybe_unfrack_path('@'),
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
def add_tasknoplay_options(parser):
"""Add options for commands that run a task w/o a defined play"""
parser.add_argument('--task-timeout', type=int, dest="task_timeout", action="store", default=C.TASK_TIMEOUT,
help="set task timeout limit in seconds, must be positive integer.")
def add_subset_options(parser):
"""Add options for commands which can run a subset of tasks"""
parser.add_argument('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
help="only run plays and tasks tagged with these values")
parser.add_argument('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
help="only run plays and tasks whose tags do not match these values")
def add_vault_options(parser):
"""Add options for loading vault files"""
parser.add_argument('--vault-id', default=[], dest='vault_ids', action='append', type=str,
help='the vault identity to use')
base_group = parser.add_mutually_exclusive_group()
base_group.add_argument('-J', '--ask-vault-password', '--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
base_group.add_argument('--vault-password-file', '--vault-pass-file', default=[], dest='vault_password_files',
help="vault password file", type=unfrack_path(follow=False), action='append')
| 18,488
|
Python
|
.py
| 316
| 48.373418
| 158
| 0.6377
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,325
|
__init__.py
|
ansible_ansible/lib/ansible/cli/arguments/__init__.py
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
| 168
|
Python
|
.py
| 3
| 54.666667
| 92
| 0.75
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,326
|
ansible_connection_cli_stub.py
|
ansible_ansible/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import fcntl
import io
import os
import pickle
import signal
import socket
import sys
import time
import traceback
import errno
import json
from contextlib import contextmanager
from ansible import constants as C
from ansible.cli.arguments import option_helpers as opt_help
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.connection import Connection, ConnectionError, send_data, recv_data
from ansible.module_utils.service import fork_process
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader, init_plugin_loader
from ansible.utils.path import unfrackpath, makedirs_safe
from ansible.utils.display import Display
from ansible.utils.jsonrpc import JsonRpcServer
display = Display()
def read_stream(byte_stream):
size = int(byte_stream.readline().strip())
data = byte_stream.read(size)
if len(data) < size:
raise Exception("EOF found before data was complete")
return data
@contextmanager
def file_lock(lock_path):
"""
Uses contextmanager to create and release a file lock based on the
given path. This allows us to create locks using `with file_lock()`
to prevent deadlocks related to failure to unlock properly.
"""
lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT, 0o600)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
yield
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
class ConnectionProcess(object):
"""
The connection process wraps around a Connection object that manages
the connection to a remote device that persists over the playbook
"""
def __init__(self, fd, play_context, socket_path, original_path, task_uuid=None, ansible_playbook_pid=None):
self.play_context = play_context
self.socket_path = socket_path
self.original_path = original_path
self._task_uuid = task_uuid
self.fd = fd
self.exception = None
self.srv = JsonRpcServer()
self.sock = None
self.connection = None
self._ansible_playbook_pid = ansible_playbook_pid
def start(self, options):
messages = list()
result = {}
try:
messages.append(('vvvv', 'control socket path is %s' % self.socket_path))
# If this is a relative path (~ gets expanded later) then plug the
# key's path on to the directory we originally came from, so we can
# find it now that our cwd is /
if self.play_context.private_key_file and self.play_context.private_key_file[0] not in '~/':
self.play_context.private_key_file = os.path.join(self.original_path, self.play_context.private_key_file)
self.connection = connection_loader.get(self.play_context.connection, self.play_context, '/dev/null',
task_uuid=self._task_uuid, ansible_playbook_pid=self._ansible_playbook_pid)
try:
self.connection.set_options(direct=options)
except ConnectionError as exc:
messages.append(('debug', to_text(exc)))
raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
self.connection._socket_path = self.socket_path
self.srv.register(self.connection)
messages.extend([('vvvv', msg) for msg in sys.stdout.getvalue().splitlines()])
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.bind(self.socket_path)
self.sock.listen(1)
messages.append(('vvvv', 'local domain socket listeners started successfully'))
except Exception as exc:
messages.extend(self.connection.pop_messages())
result['error'] = to_text(exc)
result['exception'] = traceback.format_exc()
finally:
result['messages'] = messages
self.fd.write(json.dumps(result, cls=AnsibleJSONEncoder))
self.fd.close()
def run(self):
try:
log_messages = self.connection.get_option('persistent_log_messages')
while not self.connection._conn_closed:
signal.signal(signal.SIGALRM, self.connect_timeout)
signal.signal(signal.SIGTERM, self.handler)
signal.alarm(self.connection.get_option('persistent_connect_timeout'))
self.exception = None
(s, addr) = self.sock.accept()
signal.alarm(0)
signal.signal(signal.SIGALRM, self.command_timeout)
while True:
data = recv_data(s)
if not data:
break
if log_messages:
display.display("jsonrpc request: %s" % data, log_only=True)
request = json.loads(to_text(data, errors='surrogate_or_strict'))
if request.get('method') == "exec_command" and not self.connection.connected:
self.connection._connect()
signal.alarm(self.connection.get_option('persistent_command_timeout'))
resp = self.srv.handle_request(data)
signal.alarm(0)
if log_messages:
display.display("jsonrpc response: %s" % resp, log_only=True)
send_data(s, to_bytes(resp))
s.close()
except Exception as e:
# socket.accept() will raise EINTR if the socket.close() is called
if hasattr(e, 'errno'):
if e.errno != errno.EINTR:
self.exception = traceback.format_exc()
else:
self.exception = traceback.format_exc()
finally:
# allow time for any exception msg send over socket to receive at other end before shutting down
time.sleep(0.1)
# when done, close the connection properly and cleanup the socket file so it can be recreated
self.shutdown()
def connect_timeout(self, signum, frame):
msg = 'persistent connection idle timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and ' \
'Troubleshooting Guide.' % self.connection.get_option('persistent_connect_timeout')
display.display(msg, log_only=True)
raise Exception(msg)
def command_timeout(self, signum, frame):
msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\
% self.connection.get_option('persistent_command_timeout')
display.display(msg, log_only=True)
raise Exception(msg)
def handler(self, signum, frame):
msg = 'signal handler called with signal %s.' % signum
display.display(msg, log_only=True)
raise Exception(msg)
def shutdown(self):
""" Shuts down the local domain socket
"""
lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(self.socket_path))
if os.path.exists(self.socket_path):
try:
if self.sock:
self.sock.close()
if self.connection:
self.connection.close()
if self.connection.get_option("persistent_log_messages"):
for _level, message in self.connection.pop_messages():
display.display(message, log_only=True)
except Exception:
pass
finally:
if os.path.exists(self.socket_path):
os.remove(self.socket_path)
setattr(self.connection, '_socket_path', None)
setattr(self.connection, '_connected', False)
if os.path.exists(lock_path):
os.remove(lock_path)
display.display('shutdown complete', log_only=True)
def main(args=None):
""" Called to initiate the connect to the remote device
"""
parser = opt_help.create_base_parser(prog=None)
opt_help.add_verbosity_options(parser)
parser.add_argument('playbook_pid')
parser.add_argument('task_uuid')
args = parser.parse_args(args[1:] if args is not None else args)
init_plugin_loader()
# initialize verbosity
display.verbosity = args.verbosity
rc = 0
result = {}
messages = list()
socket_path = None
# Need stdin as a byte stream
stdin = sys.stdin.buffer
# Note: update the below log capture code after Display.display() is refactored.
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
# read the play context data via stdin, which means depickling it
opts_data = read_stream(stdin)
init_data = read_stream(stdin)
pc_data = pickle.loads(init_data, encoding='bytes')
options = pickle.loads(opts_data, encoding='bytes')
play_context = PlayContext()
play_context.deserialize(pc_data)
except Exception as e:
rc = 1
result.update({
'error': to_text(e),
'exception': traceback.format_exc()
})
if rc == 0:
ssh = connection_loader.get('ssh', class_only=True)
ansible_playbook_pid = args.playbook_pid
task_uuid = args.task_uuid
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid)
# create the persistent connection dir if need be and create the paths
# which we will be using later
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
makedirs_safe(tmp_path)
socket_path = unfrackpath(cp % dict(directory=tmp_path))
lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path))
with file_lock(lock_path):
if not os.path.exists(socket_path):
messages.append(('vvvv', 'local domain socket does not exist, starting it'))
original_path = os.getcwd()
r, w = os.pipe()
pid = fork_process()
if pid == 0:
try:
os.close(r)
wfd = os.fdopen(w, 'w')
process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid)
process.start(options)
except Exception:
messages.append(('error', traceback.format_exc()))
rc = 1
if rc == 0:
process.run()
else:
process.shutdown()
sys.exit(rc)
else:
os.close(w)
rfd = os.fdopen(r, 'r')
data = json.loads(rfd.read(), cls=AnsibleJSONDecoder)
messages.extend(data.pop('messages'))
result.update(data)
else:
messages.append(('vvvv', 'found existing local domain socket, using it!'))
conn = Connection(socket_path)
try:
conn.set_options(direct=options)
except ConnectionError as exc:
messages.append(('debug', to_text(exc)))
raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
pc_data = to_text(init_data)
try:
conn.update_play_context(pc_data)
conn.set_check_prompt(task_uuid)
except Exception as exc:
# Only network_cli has update_play context and set_check_prompt, so missing this is
# not fatal e.g. netconf
if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
pass
else:
result.update({
'error': to_text(exc),
'exception': traceback.format_exc()
})
if os.path.exists(socket_path):
messages.extend(Connection(socket_path).pop_messages())
messages.append(('vvvv', sys.stdout.getvalue()))
result.update({
'messages': messages,
'socket_path': socket_path
})
sys.stdout = saved_stdout
if 'exception' in result:
rc = 1
sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder))
else:
rc = 0
sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder))
sys.exit(rc)
if __name__ == '__main__':
main()
| 13,135
|
Python
|
.py
| 279
| 35.258065
| 155
| 0.602595
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,327
|
__init__.py
|
ansible_ansible/lib/ansible/errors/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import re
import traceback
from collections.abc import Sequence
from ansible.errors.yaml_strings import (
YAML_COMMON_DICT_ERROR,
YAML_COMMON_LEADING_TAB_ERROR,
YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
YAML_COMMON_UNBALANCED_QUOTES_ERROR,
YAML_COMMON_UNQUOTED_COLON_ERROR,
YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
YAML_POSITION_DETAILS,
YAML_AND_SHORTHAND_ERROR,
)
from ansible.module_utils.common.text.converters import to_native, to_text
class AnsibleError(Exception):
"""
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
"""
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
super(AnsibleError, self).__init__(message)
self._show_content = show_content
self._suppress_extended_error = suppress_extended_error
self._message = to_native(message)
self.obj = obj
self.orig_exc = orig_exc
@property
def message(self):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
message = [self._message]
# Add from previous exceptions
if self.orig_exc:
message.append('. %s' % to_native(self.orig_exc))
# Add from yaml to give specific file/line no
if isinstance(self.obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error and not self._suppress_extended_error:
message.append(
'\n\n%s' % to_native(extended_error)
)
return ''.join(message)
@message.setter
def message(self, val):
self._message = val
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
"""
Returns the line in the file which corresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
"""
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
# In case of a YAML loading error, PyYAML will report the very last line
# as the location of the error. Avoid an index error here in order to
# return a helpful message.
file_length = len(lines)
if line_number >= file_length:
line_number = file_length - 1
# If target_line contains only whitespace, move backwards until
# actual code is found. If there are several empty lines after target_line,
# the error lines would just be blank, which is not very helpful.
target_line = lines[line_number]
while not target_line.strip():
line_number -= 1
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
"""
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
"""
error_message = ''
try:
(src_file, line_number, col_number) = self.obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
target_line = to_text(target_line)
prev_line = to_text(prev_line)
if target_line:
stripped_line = target_line.replace(" ", "")
# Check for k=v syntax in addition to YAML syntax and set the appropriate error position,
# arrow index
if re.search(r'\w+(\s+)?=(\s+)?[\w/-]+', prev_line):
error_position = prev_line.rstrip().find('=')
arrow_line = (" " * error_position) + "^ here"
error_message = YAML_POSITION_DETAILS % (src_file, line_number - 1, error_position + 1)
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n\n" % (prev_line.rstrip(), arrow_line)
error_message += YAML_AND_SHORTHAND_ERROR
else:
arrow_line = (" " * (col_number - 1)) + "^ here"
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# TODO: There may be cases where there is a valid tab in a line that has other errors.
if '\t' in target_line:
error_message += YAML_COMMON_LEADING_TAB_ERROR
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif (len(target_line) and
len(target_line) > 1 and
len(target_line) > col_number and
target_line[col_number] == ":" and
target_line.count(':') > 1):
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
# FIXME: This needs to split on the first ':' to account for modules like lineinfile
# that may have lines that contain legitimate colons, e.g., line: 'i ALL= (ALL) NOPASSWD: ALL'
# and throw off the quote matching logic.
parts = target_line.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if (len(middle) > 0 and
middle[0] in ['"', "'"] and
middle[-1] in ['"', "'"] and
target_line.count("'") > 2 or
target_line.count('"') > 2):
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsiblePromptInterrupt(AnsibleError):
"""User interrupt"""
class AnsiblePromptNoninteractive(AnsibleError):
"""Unable to get user input"""
class AnsibleAssertionError(AnsibleError, AssertionError):
"""Invalid assertion"""
pass
class AnsibleOptionsError(AnsibleError):
""" bad or incomplete options passed """
pass
class AnsibleRequiredOptionError(AnsibleOptionsError):
""" bad or incomplete options passed """
pass
class AnsibleParserError(AnsibleError):
""" something was detected early that is wrong about a playbook or data file """
pass
class AnsibleInternalError(AnsibleError):
""" internal safeguards tripped, something happened in the code that should never happen """
pass
class AnsibleRuntimeError(AnsibleError):
""" ansible had a problem while running a playbook """
pass
class AnsibleModuleError(AnsibleRuntimeError):
""" a module failed somehow """
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
""" the transport / connection_plugin had a fatal error """
pass
class AnsibleAuthenticationFailure(AnsibleConnectionFailure):
"""invalid username/password/key"""
pass
class AnsibleCallbackError(AnsibleRuntimeError):
""" a callback failure """
pass
class AnsibleTemplateError(AnsibleRuntimeError):
"""A template related error"""
pass
class AnsibleFilterError(AnsibleTemplateError):
""" a templating failure """
pass
class AnsibleLookupError(AnsibleTemplateError):
""" a lookup failure """
pass
class AnsibleUndefinedVariable(AnsibleTemplateError):
""" a templating failure """
pass
class AnsibleFileNotFound(AnsibleRuntimeError):
""" a file missing failure """
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None):
self.file_name = file_name
self.paths = paths
if message:
message += "\n"
if self.file_name:
message += "Could not find or access '%s'" % to_text(self.file_name)
else:
message += "Could not find file"
if self.paths and isinstance(self.paths, Sequence):
searched = to_text('\n\t'.join(self.paths))
if message:
message += "\n"
message += "Searched in:\n\t%s" % searched
message += " on the Ansible Controller.\nIf you are using a module and expect the file to exist on the remote, see the remote_src option"
super(AnsibleFileNotFound, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
# These Exceptions are temporary, using them as flow control until we can get a better solution.
# DO NOT USE as they will probably be removed soon.
# We will port the action modules in our tree to use a context manager instead.
class AnsibleAction(AnsibleRuntimeError):
""" Base Exception for Action plugin flow control """
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
super(AnsibleAction, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
if result is None:
self.result = {}
else:
self.result = result
class AnsibleActionSkip(AnsibleAction):
""" an action runtime skip"""
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
self.result.update({'skipped': True, 'msg': message})
class AnsibleActionFail(AnsibleAction):
""" an action runtime failure"""
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
self.result.update({'failed': True, 'msg': message, 'exception': traceback.format_exc()})
class _AnsibleActionDone(AnsibleAction):
""" an action runtime early exit"""
pass
class AnsiblePluginError(AnsibleError):
""" base class for Ansible plugin-related errors that do not need AnsibleError contextual data """
def __init__(self, message=None, plugin_load_context=None):
super(AnsiblePluginError, self).__init__(message)
self.plugin_load_context = plugin_load_context
class AnsiblePluginRemovedError(AnsiblePluginError):
""" a requested plugin has been removed """
pass
class AnsiblePluginCircularRedirect(AnsiblePluginError):
"""a cycle was detected in plugin redirection"""
pass
class AnsibleCollectionUnsupportedVersionError(AnsiblePluginError):
"""a collection is not supported by this version of Ansible"""
pass
class AnsibleFilterTypeError(AnsibleTemplateError, TypeError):
""" a Jinja filter templating failure due to bad type"""
pass
class AnsiblePluginNotFound(AnsiblePluginError):
""" Indicates we did not find an Ansible plugin """
pass
| 14,943
|
Python
|
.py
| 287
| 40.797909
| 151
| 0.621779
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,328
|
yaml_strings.py
|
ansible_ansible/lib/ansible/errors/yaml_strings.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
__all__ = [
'YAML_SYNTAX_ERROR',
'YAML_POSITION_DETAILS',
'YAML_COMMON_DICT_ERROR',
'YAML_COMMON_UNQUOTED_VARIABLE_ERROR',
'YAML_COMMON_UNQUOTED_COLON_ERROR',
'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR',
'YAML_COMMON_UNBALANCED_QUOTES_ERROR',
]
YAML_SYNTAX_ERROR = """\
Syntax Error while loading YAML.
%s"""
YAML_POSITION_DETAILS = """\
The error appears to be in '%s': line %s, column %s, but may
be elsewhere in the file depending on the exact syntax problem.
"""
YAML_COMMON_DICT_ERROR = """\
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
YAML_COMMON_UNQUOTED_COLON_ERROR = """\
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
Or equivalently:
when: "'ok' in result.stdout"
"""
YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
YAML_COMMON_LEADING_TAB_ERROR = """\
There appears to be a tab character at the start of the line.
YAML does not use tabs for formatting. Tabs should be replaced with spaces.
For example:
- name: update tooling
vars:
version: 1.2.3
# ^--- there is a tab there.
Should be written as:
- name: update tooling
vars:
version: 1.2.3
# ^--- all spaces here.
"""
YAML_AND_SHORTHAND_ERROR = """\
There appears to be both 'k=v' shorthand syntax and YAML in this task. \
Only one syntax may be used.
"""
| 3,858
|
Python
|
.py
| 103
| 34.912621
| 81
| 0.733333
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,329
|
importlib_resources.py
|
ansible_ansible/lib/ansible/compat/importlib_resources.py
|
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import sys
HAS_IMPORTLIB_RESOURCES = False
if sys.version_info < (3, 10):
try:
from importlib_resources import files # type: ignore[import] # pylint: disable=unused-import
except ImportError:
files = None # type: ignore[assignment]
else:
HAS_IMPORTLIB_RESOURCES = True
else:
from importlib.resources import files
HAS_IMPORTLIB_RESOURCES = True
| 572
|
Python
|
.py
| 15
| 33.933333
| 102
| 0.728752
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,330
|
__init__.py
|
ansible_ansible/lib/ansible/compat/__init__.py
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Compat library for ansible. This contains compatibility definitions for older python
When we need to import a module differently depending on python version, do it
here. Then in the code we can simply import from compat in order to get what we want.
"""
from __future__ import annotations
| 1,002
|
Python
|
.py
| 22
| 44.5
| 86
| 0.78141
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,331
|
task_queue_manager.py
|
ansible_ansible/lib/ansible/executor/task_queue_manager.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import sys
import tempfile
import threading
import time
import typing as t
import multiprocessing.queues
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.stats import AggregateStats
from ansible.executor.task_result import TaskResult
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.vars.reserved import warn_if_reserved
from ansible.utils.display import Display
from ansible.utils.lock import lock_decorator
from ansible.utils.multiprocessing import context as multiprocessing_context
from dataclasses import dataclass
__all__ = ['TaskQueueManager']
display = Display()
class CallbackSend:
def __init__(self, method_name, *args, **kwargs):
self.method_name = method_name
self.args = args
self.kwargs = kwargs
class DisplaySend:
def __init__(self, method, *args, **kwargs):
self.method = method
self.args = args
self.kwargs = kwargs
@dataclass
class PromptSend:
worker_id: int
prompt: str
private: bool = True
seconds: int = None
interrupt_input: t.Iterable[bytes] = None
complete_input: t.Iterable[bytes] = None
class FinalQueue(multiprocessing.queues.SimpleQueue):
def __init__(self, *args, **kwargs):
kwargs['ctx'] = multiprocessing_context
super().__init__(*args, **kwargs)
def send_callback(self, method_name, *args, **kwargs):
self.put(
CallbackSend(method_name, *args, **kwargs),
)
def send_task_result(self, *args, **kwargs):
if isinstance(args[0], TaskResult):
tr = args[0]
else:
tr = TaskResult(*args, **kwargs)
self.put(
tr,
)
def send_display(self, method, *args, **kwargs):
self.put(
DisplaySend(method, *args, **kwargs),
)
def send_prompt(self, **kwargs):
self.put(
PromptSend(**kwargs),
)
class AnsibleEndPlay(Exception):
def __init__(self, result):
self.result = result
class TaskQueueManager:
"""
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
"""
RUN_OK = 0
RUN_ERROR = 1
RUN_FAILED_HOSTS = 2
RUN_UNREACHABLE_HOSTS = 4
RUN_FAILED_BREAK_PLAY = 8
RUN_UNKNOWN_ERROR = 255
def __init__(self, inventory, variable_manager, loader, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False, forks=None):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._forks = forks or 5
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
# make sure any module paths (if specified) are added to the module_loader
if context.CLIARGS.get('module_path', False):
for path in context.CLIARGS['module_path']:
if path:
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
try:
self._final_q = FinalQueue()
except OSError as e:
raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
self._callback_lock = threading.Lock()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
self._workers.append(None)
def load_callbacks(self):
"""
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
"""
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
self._stdout_callback.set_options()
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
# get all configured loadable callbacks (adjacent, builtin)
callback_list = list(callback_loader.all(class_only=True))
# add enabled callbacks that refer to collections, which might not appear in normal listing
for c in C.CALLBACKS_ENABLED:
# load all, as collection ones might be using short/redirected names and not a fqcn
plugin = callback_loader.get(c, class_only=True)
# TODO: check if this skip is redundant, loader should handle bad file/plugin cases already
if plugin:
# avoids incorrect and dupes possible due to collections
if plugin not in callback_list:
callback_list.append(plugin)
else:
display.warning("Skipping callback plugin '%s', unable to load" % c)
# for each callback in the list see if we should add it to 'active callbacks' used in the play
for callback_plugin in callback_list:
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
# try to get collection world name first
cnames = getattr(callback_plugin, '_redirected_names', [])
if cnames:
# store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
callback_name = cnames[0]
else:
# fallback to 'old loader name'
(callback_name, ext) = os.path.splitext(os.path.basename(callback_plugin._original_path))
display.vvvvv("Attempting to use '%s' callback." % (callback_name))
if callback_type == 'stdout':
# we only allow one callback of type 'stdout' to be loaded,
if callback_name != self._stdout_callback or stdout_callback_loaded:
display.vv("Skipping callback '%s', as we already have a stdout callback." % (callback_name))
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
# TODO: remove special case for tree, which is an adhoc cli option --tree
pass
elif not self._run_additional_callbacks or (callback_needs_enabled and (
# only run if not adhoc, or adhoc was specifically configured to run + check enabled list
C.CALLBACKS_ENABLED is None or callback_name not in C.CALLBACKS_ENABLED)):
# 2.x plugins shipped with ansible should require enabling, older or non shipped should load automatically
continue
try:
callback_obj = callback_plugin()
# avoid bad plugin not returning an object, only needed cause we do class_only load and bypass loader checks,
# really a bug in the plugin itself which we ignore as callback errors are not supposed to be fatal.
if callback_obj:
# skip initializing if we already did the work for the same plugin (even with diff names)
if callback_obj not in self._callback_plugins:
callback_obj.set_options()
self._callback_plugins.append(callback_obj)
else:
display.vv("Skipping callback '%s', already loaded as '%s'." % (callback_plugin, callback_name))
else:
display.warning("Skipping callback '%s', as it does not create a valid plugin instance." % callback_name)
continue
except Exception as e:
display.warning("Skipping callback '%s', unable to load due to: %s" % (callback_name, to_native(e)))
continue
self._callbacks_loaded = True
def run(self, play):
"""
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
"""
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
warn_if_reserved(all_vars, templar.environment.globals.keys())
new_play = play.copy()
new_play.post_validate(templar)
new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno())
if (self._stdout_callback and
hasattr(self._stdout_callback, 'set_play_context')):
self._stdout_callback.set_play_context(play_context)
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done=self._start_at_done,
)
# adjust to # of workers to configured forks or size of batch, whatever is lower
self._initialize_processes(min(self._forks, iterator.batch_size))
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# Because the TQM may survive multiple play runs, we start by marking
# any hosts as failed in the iterator here which may have been marked
# as failed in previous runs. Then we clear the internal list of failed
# hosts so we know what failed this round.
for host_name in self._failed_hosts.keys():
host = self._inventory.get_host(host_name)
iterator.mark_host_failed(host)
for host_name in self._unreachable_hosts.keys():
iterator._play._removed_hosts.append(host_name)
self.clear_failed_hosts()
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if context.CLIARGS.get('start_at_task') is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
try:
play_return = strategy.run(iterator, play_context)
finally:
strategy.cleanup()
self._cleanup_processes()
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
if iterator.end_play:
raise AnsibleEndPlay(play_return)
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
# We no longer flush on every write in ``Display.display``
# just ensure we've flushed during cleanup
sys.stdout.flush()
sys.stderr.flush()
def _cleanup_processes(self):
if hasattr(self, '_workers'):
for attempts_remaining in range(C.WORKER_SHUTDOWN_POLL_COUNT - 1, -1, -1):
if not any(worker_prc and worker_prc.is_alive() for worker_prc in self._workers):
break
if attempts_remaining:
time.sleep(C.WORKER_SHUTDOWN_POLL_DELAY)
else:
display.warning('One or more worker processes are still running and will be terminated.')
for worker_prc in self._workers:
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def has_dead_workers(self):
# [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
defunct = False
for x in self._workers:
if getattr(x, 'exitcode', None):
defunct = True
return defunct
@lock_decorator(attr='_callback_lock')
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# a plugin can opt in to implicit tasks (such as meta). It does this
# by declaring self.wants_implicit_tasks = True.
wants_implicit_tasks = getattr(callback_plugin, 'wants_implicit_tasks', False)
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.removeprefix('v2_'), None)
if gotit is not None:
methods.append(gotit)
# send clean copies
new_args = []
# If we end up being given an implicit task, we'll set this flag in
# the loop below. If the plugin doesn't care about those, then we
# check and continue to the next iteration of the outer loop.
is_implicit_task = False
for arg in args:
# FIXME: add play/task cleaners
if isinstance(arg, TaskResult):
new_args.append(arg.clean_copy())
# elif isinstance(arg, Play):
# elif isinstance(arg, Task):
else:
new_args.append(arg)
if isinstance(arg, Task) and arg.implicit:
is_implicit_task = True
if is_implicit_task and not wants_implicit_tasks:
continue
for method in methods:
try:
method(*new_args, **kwargs)
except Exception as e:
# TODO: add config toggle to make this fatal or not?
display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
from traceback import format_tb
from sys import exc_info
display.vvv('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
| 18,640
|
Python
|
.py
| 380
| 38.544737
| 156
| 0.634417
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,332
|
task_executor.py
|
ansible_ansible/lib/ansible/executor/task_executor.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import time
import json
import pathlib
import signal
import subprocess
import sys
import traceback
from ansible import constants as C
from ansible.cli import scripts
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip
from ansible.executor.task_result import TaskResult
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import binary_type
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.connection import write_to_stream
from ansible.module_utils.six import string_types
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.plugins import get_plugin_class
from ansible.plugins.loader import become_loader, cliconf_loader, connection_loader, httpapi_loader, netconf_loader, terminal_loader
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var
from ansible.vars.clean import namespace_facts, clean_facts
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
RETURN_VARS = [x for x in C.MAGIC_VARIABLE_MAPPING.items() if 'become' not in x and '_pass' not in x]
__all__ = ['TaskExecutor']
class TaskTimeoutError(BaseException):
def __init__(self, message="", frame=None):
if frame is not None:
orig = frame
root = pathlib.Path(__file__).parent
while not pathlib.Path(frame.f_code.co_filename).is_relative_to(root):
frame = frame.f_back
self.frame = 'Interrupted at %s called from %s' % (orig, frame)
super(TaskTimeoutError, self).__init__(message)
def task_timeout(signum, frame):
raise TaskTimeoutError(frame=frame)
def remove_omit(task_args, omit_token):
"""
Remove args with a value equal to the ``omit_token`` recursively
to align with now having suboptions in the argument_spec
"""
if not isinstance(task_args, dict):
return task_args
new_args = {}
for i in task_args.items():
if i[1] == omit_token:
continue
elif isinstance(i[1], dict):
new_args[i[0]] = remove_omit(i[1], omit_token)
elif isinstance(i[1], list):
new_args[i[0]] = [remove_omit(v, omit_token) for v in i[1]]
else:
new_args[i[0]] = i[1]
return new_args
class TaskExecutor:
"""
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
"""
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, final_q, variable_manager):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
self._connection = None
self._final_q = final_q
self._variable_manager = variable_manager
self._loop_eval_error = None
self._task.squash()
def run(self):
"""
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with self._run_loop()
or self._execute(). After that, the returned results are parsed and
returned as a dict.
"""
display.debug("in run() - task %s" % self._task._uuid)
try:
try:
items = self._get_loop_items()
except AnsibleUndefinedVariable as e:
# save the error raised here for use later
items = None
self._loop_eval_error = e
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
# create the overall result item
res = dict(results=item_results)
# loop through the item results and set the global changed/failed/skipped result flags based on any item.
res['skipped'] = True
for item in item_results:
if 'changed' in item and item['changed'] and not res.get('changed'):
res['changed'] = True
if res['skipped'] and ('skipped' not in item or ('skipped' in item and not item['skipped'])):
res['skipped'] = False
if 'failed' in item and item['failed']:
item_ignore = item.pop('_ansible_ignore_errors')
if not res.get('failed'):
res['failed'] = True
res['msg'] = 'One or more items failed'
self._task.ignore_errors = item_ignore
elif self._task.ignore_errors and not item_ignore:
self._task.ignore_errors = item_ignore
if 'unreachable' in item and item['unreachable']:
item_ignore_unreachable = item.pop('_ansible_ignore_unreachable')
if not res.get('unreachable'):
self._task.ignore_unreachable = item_ignore_unreachable
elif self._task.ignore_unreachable and not item_ignore_unreachable:
self._task.ignore_unreachable = item_ignore_unreachable
# ensure to accumulate these
for array in ['warnings', 'deprecations']:
if array in item and item[array]:
if array not in res:
res[array] = []
if not isinstance(item[array], list):
item[array] = [item[array]]
res[array] = res[array] + item[array]
del item[array]
if not res.get('failed', False):
res['msg'] = 'All items completed'
if res['skipped']:
res['msg'] = 'All items skipped'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
display.debug("calling self._execute()")
res = self._execute()
display.debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
def _clean_res(res, errors='surrogate_or_strict'):
if isinstance(res, binary_type):
return to_unsafe_text(res, errors=errors)
elif isinstance(res, dict):
for k in res:
try:
res[k] = _clean_res(res[k], errors=errors)
except UnicodeError:
if k == 'diff':
# If this is a diff, substitute a replacement character if the value
# is undecodable as utf8. (Fix #21804)
display.warning("We were unable to decode all characters in the module return data."
" Replaced some in an effort to return as much as possible")
res[k] = _clean_res(res[k], errors='surrogate_then_replace')
else:
raise
elif isinstance(res, list):
for idx, item in enumerate(res):
res[idx] = _clean_res(item, errors=errors)
return res
display.debug("dumping result to json")
res = _clean_res(res)
display.debug("done dumping result, returning")
return res
except AnsibleError as e:
return dict(failed=True, msg=wrap_var(to_text(e, nonstring='simplerepr')), _ansible_no_log=self._play_context.no_log)
except Exception as e:
return dict(failed=True, msg=wrap_var('Unexpected failure during module execution: %s' % (to_native(e, nonstring='simplerepr'))),
exception=to_text(traceback.format_exc()), stdout='', _ansible_no_log=self._play_context.no_log)
finally:
try:
self._connection.close()
except AttributeError:
pass
except Exception as e:
display.debug(u"error closing connection: %s" % to_text(e))
def _get_loop_items(self):
"""
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
"""
# get search path for this task to pass to lookup plugins
self._job_vars['ansible_search_path'] = self._task.get_search_path()
# ensure basedir is always in (dwim already searches here but we need to display it)
if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
templar = Templar(loader=self._loader, variables=self._job_vars)
items = None
if self._task.loop_with:
if self._task.loop_with in self._shared_loader_obj.lookup_loader:
# TODO: hardcoded so it fails for non first_found lookups, but this should be generalized for those that don't do their own templating
# lookup prop/attribute?
fail = bool(self._task.loop_with != 'first_found')
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, fail_on_undefined=fail, convert_bare=False)
# get lookup
mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop_with, loader=self._loader, templar=templar)
# give lookup task 'context' for subdir (mostly needed for first_found)
for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
if subdir in self._task.action:
break
setattr(mylookup, '_subdir', subdir + 's')
# run lookup
items = wrap_var(mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True))
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop_with)
elif self._task.loop is not None:
items = templar.template(self._task.loop)
if not isinstance(items, list):
raise AnsibleError(
"Invalid data passed to 'loop', it requires a list, got this instead: %s."
" Hint: If you passed a list/dict of just one element,"
" try adding wantlist=True to your lookup invocation or use q/query instead of lookup." % items
)
return items
def _run_loop(self, items):
"""
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
"""
task_vars = self._job_vars
templar = Templar(loader=self._loader, variables=task_vars)
self._task.loop_control.post_validate(templar=templar)
loop_var = self._task.loop_control.loop_var
index_var = self._task.loop_control.index_var
loop_pause = self._task.loop_control.pause
extended = self._task.loop_control.extended
extended_allitems = self._task.loop_control.extended_allitems
# ensure we always have a label
label = self._task.loop_control.label or '{{' + loop_var + '}}'
if loop_var in task_vars:
display.warning(u"%s: The loop variable '%s' is already in use. "
u"You should set the `loop_var` value in the `loop_control` option for the task"
u" to something else to avoid variable collisions and unexpected behavior." % (self._task, loop_var))
ran_once = False
task_fields = None
no_log = False
items_len = len(items)
results = []
for item_index, item in enumerate(items):
task_vars['ansible_loop_var'] = loop_var
task_vars[loop_var] = item
if index_var:
task_vars['ansible_index_var'] = index_var
task_vars[index_var] = item_index
if extended:
task_vars['ansible_loop'] = {
'index': item_index + 1,
'index0': item_index,
'first': item_index == 0,
'last': item_index + 1 == items_len,
'length': items_len,
'revindex': items_len - item_index,
'revindex0': items_len - item_index - 1,
}
if extended_allitems:
task_vars['ansible_loop']['allitems'] = items
try:
task_vars['ansible_loop']['nextitem'] = items[item_index + 1]
except IndexError:
pass
if item_index - 1 >= 0:
task_vars['ansible_loop']['previtem'] = items[item_index - 1]
# Update template vars to reflect current loop iteration
templar.available_variables = task_vars
# pause between loop iterations
if loop_pause and ran_once:
time.sleep(loop_pause)
else:
ran_once = True
try:
tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
tmp_task._parent = self._task._parent
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
results.append(dict(failed=True, msg=to_text(e)))
continue
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
if self._task.register:
# Ensure per loop iteration results are registered in case `_execute()`
# returns early (when conditional, failure, ...).
# This is needed in case the registered variable is used in the loop label template.
task_vars[self._task.register] = res
task_fields = self._task.dump_attrs()
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
# update 'general no_log' based on specific no_log
no_log = no_log or tmp_task.no_log
# now update the result with the item info, and append the result
# to the list of results
res[loop_var] = item
res['ansible_loop_var'] = loop_var
if index_var:
res[index_var] = item_index
res['ansible_index_var'] = index_var
if extended:
res['ansible_loop'] = task_vars['ansible_loop']
res['_ansible_item_result'] = True
res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')
res['_ansible_ignore_unreachable'] = task_fields.get('ignore_unreachable')
# gets templated here unlike rest of loop_control fields, depends on loop_var above
try:
res['_ansible_item_label'] = templar.template(label)
except AnsibleUndefinedVariable as e:
res.update({
'failed': True,
'msg': 'Failed to template loop_control.label: %s' % to_text(e)
})
# if plugin is loaded, get resolved name, otherwise leave original task connection
if self._connection and not isinstance(self._connection, string_types):
task_fields['connection'] = getattr(self._connection, 'ansible_name')
tr = TaskResult(
self._host.name,
self._task._uuid,
res,
task_fields=task_fields,
)
if tr.is_failed() or tr.is_unreachable():
self._final_q.send_callback('v2_runner_item_on_failed', tr)
elif tr.is_skipped():
self._final_q.send_callback('v2_runner_item_on_skipped', tr)
else:
if getattr(self._task, 'diff', False):
self._final_q.send_callback('v2_on_file_diff', tr)
if self._task.action not in C._ACTION_INVENTORY_TASKS:
self._final_q.send_callback('v2_runner_item_on_ok', tr)
results.append(res)
# break loop if break_when conditions are met
if self._task.loop_control and self._task.loop_control.break_when:
cond = Conditional(loader=self._loader)
cond.when = self._task.loop_control.get_validated_value(
'break_when', self._task.loop_control.fattributes.get('break_when'), self._task.loop_control.break_when, templar
)
if cond.evaluate_conditional(templar, task_vars):
# delete loop vars before exiting loop
del task_vars[loop_var]
break
# done with loop var, remove for next iteration
del task_vars[loop_var]
# clear 'connection related' plugin variables for next iteration
if self._connection:
clear_plugins = {
'connection': self._connection._load_name,
'shell': self._connection._shell._load_name
}
if self._connection.become:
clear_plugins['become'] = self._connection.become._load_name
for plugin_type, plugin_name in clear_plugins.items():
for var in C.config.get_plugin_vars(plugin_type, plugin_name):
if var in task_vars and var not in self._job_vars:
del task_vars[var]
self._task.no_log = no_log
# NOTE: run_once cannot contain loop vars because it's templated earlier also
# This is saving the post-validated field from the last loop so the strategy can use the templated value post task execution
self._task.run_once = task_fields.get('run_once')
self._task.action = task_fields.get('action')
return results
def _calculate_delegate_to(self, templar, variables):
"""This method is responsible for effectively pre-validating Task.delegate_to and will
happen before Task.post_validate is executed
"""
delegated_vars, delegated_host_name = self._variable_manager.get_delegated_vars_and_hostname(templar, self._task, variables)
# At the point this is executed it is safe to mutate self._task,
# since `self._task` is either a copy referred to by `tmp_task` in `_run_loop`
# or just a singular non-looped task
if delegated_host_name:
self._task.delegate_to = delegated_host_name
variables.update(delegated_vars)
def _execute(self, variables=None):
"""
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
"""
if variables is None:
variables = self._job_vars
templar = Templar(loader=self._loader, variables=variables)
self._calculate_delegate_to(templar, variables)
context_validation_error = None
# a certain subset of variables exist.
tempvars = variables.copy()
try:
# TODO: remove play_context as this does not take delegation nor loops correctly into account,
# the task itself should hold the correct values for connection/shell/become/terminal plugin options to finalize.
# Kept for now for backwards compatibility and a few functions that are still exclusive to it.
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
self._play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not self._play_context.remote_addr:
self._play_context.remote_addr = self._host.address
# We also add "magic" variables back into the variables dict to make sure
self._play_context.update_vars(tempvars)
except AnsibleError as e:
# save the error, which we'll raise later if we don't end up
# skipping this task during the conditional evaluation step
context_validation_error = e
no_log = self._play_context.no_log
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
try:
conditional_result, false_condition = self._task.evaluate_conditional_with_result(templar, tempvars)
if not conditional_result:
display.debug("when evaluation is False, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional result was False',
false_condition=false_condition, _ansible_no_log=no_log)
except AnsibleError as e:
# loop error takes precedence
if self._loop_eval_error is not None:
# Display the error from the conditional as well to prevent
# losing information useful for debugging.
display.v(to_text(e))
raise self._loop_eval_error # pylint: disable=raising-bad-type
raise
# Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
if self._loop_eval_error is not None:
raise self._loop_eval_error # pylint: disable=raising-bad-type
# if we ran into an error while setting up the PlayContext, raise it now, unless is known issue with delegation
# and undefined vars (correct values are in cvars later on and connection plugins, if still error, blows up there)
if context_validation_error is not None:
raiseit = True
if self._task.delegate_to:
if isinstance(context_validation_error, AnsibleUndefinedVariable):
raiseit = False
elif isinstance(context_validation_error, AnsibleParserError):
# parser error, might be cause by undef too
orig_exc = getattr(context_validation_error, 'orig_exc', None)
if isinstance(orig_exc, AnsibleUndefinedVariable):
raiseit = False
if raiseit:
raise context_validation_error # pylint: disable=raising-bad-type
# set templar to use temp variables until loop is evaluated
templar.available_variables = tempvars
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action in C._ACTION_INCLUDE_TASKS:
include_args = self._task.args.copy()
include_file = include_args.pop('_raw_params', None)
if not include_file:
return dict(failed=True, msg="No include file was specified to the include")
include_file = templar.template(include_file)
return dict(include=include_file, include_args=include_args)
# if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
elif self._task.action in C._ACTION_INCLUDE_ROLE:
include_args = self._task.args.copy()
return dict(include_args=include_args)
# Now we do final validation on the task, which sets all fields to their final values.
try:
self._task.post_validate(templar=templar)
except AnsibleError:
raise
except Exception:
return dict(changed=False, failed=True, _ansible_no_log=no_log, exception=to_text(traceback.format_exc()))
if '_variable_params' in self._task.args:
variable_params = self._task.args.pop('_variable_params')
if isinstance(variable_params, dict):
if C.INJECT_FACTS_AS_VARS:
display.warning("Using a variable for a task's 'args' is unsafe in some situations "
"(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)")
variable_params.update(self._task.args)
self._task.args = variable_params
else:
# if we didn't get a dict, it means there's garbage remaining after k=v parsing, just give up
# see https://github.com/ansible/ansible/issues/79862
raise AnsibleError(f"invalid or malformed argument: '{variable_params}'")
# update no_log to task value, now that we have it templated
no_log = self._task.no_log
# free tempvars up, not used anymore, cvars and vars_copy should be mainly used after this point
# updating the original 'variables' at the end
tempvars = {}
# setup cvars copy, used for all connection related templating
if self._task.delegate_to:
# use vars from delegated host (which already include task vars) instead of original host
cvars = variables.get('ansible_delegated_vars', {}).get(self._task.delegate_to, {})
else:
# just use normal host vars
cvars = variables
templar.available_variables = cvars
# use magic var if it exists, if not, let task inheritance do it's thing.
if cvars.get('ansible_connection') is not None:
current_connection = templar.template(cvars['ansible_connection'])
else:
current_connection = self._task.connection
# get the connection and the handler for this execution
if (not self._connection or
not getattr(self._connection, 'connected', False) or
not self._connection.matches_name([current_connection]) or
# pc compare, left here for old plugins, but should be irrelevant for those
# using get_option, since they are cleared each iteration.
self._play_context.remote_addr != self._connection._play_context.remote_addr):
self._connection = self._get_connection(cvars, templar, current_connection)
else:
# if connection is reused, its _play_context is no longer valid and needs
# to be replaced with the one templated above, in case other data changed
self._connection._play_context = self._play_context
self._set_become_plugin(cvars, templar, self._connection)
plugin_vars = self._set_connection_options(cvars, templar)
# make a copy of the job vars here, as we update them here and later,
# but don't want to pollute original
vars_copy = variables.copy()
# update with connection info (i.e ansible_host/ansible_user)
self._connection.update_vars(vars_copy)
templar.available_variables = vars_copy
# TODO: eventually remove as pc is taken out of the resolution path
# feed back into pc to ensure plugins not using get_option can get correct value
self._connection._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=vars_copy, templar=templar)
# TODO: eventually remove this block as this should be a 'consequence' of 'forced_local' modules, right now rely on remote_is_local connection
# special handling for python interpreter for network_os, default to ansible python unless overridden
if 'ansible_python_interpreter' not in cvars and 'ansible_network_os' in cvars and getattr(self._connection, '_remote_is_local', False):
# this also avoids 'python discovery'
cvars['ansible_python_interpreter'] = sys.executable
# get handler
self._handler, module_context = self._get_action_handler_with_module_context(templar=templar)
if module_context is not None:
module_defaults_fqcn = module_context.resolved_fqcn
else:
module_defaults_fqcn = self._task.resolved_action
# Apply default params for action/module, if present
self._task.args = get_action_args_with_defaults(
module_defaults_fqcn, self._task.args, self._task.module_defaults, templar,
action_groups=self._task._parent._play._action_groups
)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = remove_omit(self._task.args, omit_token)
retries = 1 # includes the default actual run + retries set by user/default
if self._task.retries is not None:
retries += max(0, self._task.retries)
elif self._task.until:
retries += 3 # the default is not set in FA because we need to differentiate "unset" value
delay = self._task.delay
if delay < 0:
delay = 1
display.debug("starting attempt loop")
result = None
for attempt in range(1, retries + 1):
display.debug("running the handler")
try:
if self._task.timeout:
old_sig = signal.signal(signal.SIGALRM, task_timeout)
signal.alarm(self._task.timeout)
result = self._handler.run(task_vars=vars_copy)
except (AnsibleActionFail, AnsibleActionSkip) as e:
return e.result
except AnsibleConnectionFailure as e:
return dict(unreachable=True, msg=to_text(e))
except TaskTimeoutError as e:
msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
return dict(failed=True, msg=msg, timedout={'frame': e.frame, 'period': self._task.timeout})
finally:
if self._task.timeout:
signal.alarm(0)
old_sig = signal.signal(signal.SIGALRM, old_sig)
self._handler.cleanup()
display.debug("handler run complete")
# propagate no log to result- the action can set this, so only overwrite it with the task's value if missing or falsey
result["_ansible_no_log"] = bool(no_log or result.get('_ansible_no_log', False))
if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
result = wrap_var(result)
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
vars_copy[self._task.register] = result
if self._task.async_val > 0:
if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
if result.get('failed'):
self._final_q.send_callback(
'v2_runner_on_async_failed',
TaskResult(self._host.name,
self._task._uuid,
result,
task_fields=self._task.dump_attrs()))
else:
self._final_q.send_callback(
'v2_runner_on_async_ok',
TaskResult(self._host.name,
self._task._uuid,
result,
task_fields=self._task.dump_attrs()))
# ensure no log is preserved
result["_ansible_no_log"] = no_log
# helper methods for use below in evaluating changed/failed_when
def _evaluate_changed_when_result(result):
if self._task.changed_when is not None and self._task.changed_when:
cond = Conditional(loader=self._loader)
cond.when = self._task.changed_when
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
def _evaluate_failed_when_result(result):
if self._task.failed_when:
cond = Conditional(loader=self._loader)
cond.when = self._task.failed_when
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
else:
failed_when_result = False
return failed_when_result
if 'ansible_facts' in result and self._task.action not in C._ACTION_DEBUG:
if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
if self._task.delegate_to and self._task.delegate_facts:
if '_ansible_delegated_vars' in vars_copy:
vars_copy['_ansible_delegated_vars'].update(result['ansible_facts'])
else:
vars_copy['_ansible_delegated_vars'] = result['ansible_facts']
else:
vars_copy.update(result['ansible_facts'])
else:
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
af = wrap_var(result['ansible_facts'])
vars_copy['ansible_facts'] = combine_vars(vars_copy.get('ansible_facts', {}), namespace_facts(af))
if C.INJECT_FACTS_AS_VARS:
vars_copy.update(clean_facts(af))
# set the failed property if it was missing.
if 'failed' not in result:
# rc is here for backwards compatibility and modules that use it instead of 'failed'
if 'rc' in result and result['rc'] not in [0, "0"]:
result['failed'] = True
else:
result['failed'] = False
# Make attempts and retries available early to allow their use in changed/failed_when
if retries > 1:
result['attempts'] = attempt
# set the changed property if it was missing.
if 'changed' not in result:
result['changed'] = False
if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
result = wrap_var(result)
# re-update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
# This gives changed/failed_when access to additional recently modified
# attributes of result
if self._task.register:
vars_copy[self._task.register] = result
# if we didn't skip this task, use the helpers to evaluate the changed/
# failed_when properties
if 'skipped' not in result:
condname = 'changed'
try:
_evaluate_changed_when_result(result)
condname = 'failed'
_evaluate_failed_when_result(result)
except AnsibleError as e:
result['failed'] = True
result['%s_when_result' % condname] = to_text(e)
if retries > 1:
cond = Conditional(loader=self._loader)
cond.when = self._task.until or [not result['failed']]
if cond.evaluate_conditional(templar, vars_copy):
break
else:
# no conditional check, or it failed, so sleep for the specified time
if attempt < retries:
result['_ansible_retry'] = True
result['retries'] = retries
display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
self._final_q.send_callback(
'v2_runner_retry',
TaskResult(
self._host.name,
self._task._uuid,
result,
task_fields=self._task.dump_attrs()
)
)
time.sleep(delay)
self._handler = self._get_action_handler(templar=templar)
else:
if retries > 1:
# we ran out of attempts, so mark the result as failed
result['attempts'] = retries - 1
result['failed'] = True
if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
result = wrap_var(result)
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
variables[self._task.register] = result
if 'ansible_facts' in result and self._task.action not in C._ACTION_DEBUG:
if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
variables.update(result['ansible_facts'])
else:
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
af = wrap_var(result['ansible_facts'])
variables['ansible_facts'] = combine_vars(variables.get('ansible_facts', {}), namespace_facts(af))
if C.INJECT_FACTS_AS_VARS:
variables.update(clean_facts(af))
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
# may be item-specific, ie. "notify: service {{item}}"
if self._task.notify is not None:
result['_ansible_notify'] = self._task.notify
# add the delegated vars to the result, so we can reference them
# on the results side without having to do any further templating
# also now add connection vars results when delegating
if self._task.delegate_to:
result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
for k in plugin_vars:
result["_ansible_delegated_vars"][k] = cvars.get(k)
# note: here for callbacks that rely on this info to display delegation
for requireshed in ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection'):
if requireshed not in result["_ansible_delegated_vars"] and requireshed in cvars:
result["_ansible_delegated_vars"][requireshed] = cvars.get(requireshed)
# and return
display.debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result, templar, task_vars=None):
"""
Polls for the specified JID to be complete
"""
if task_vars is None:
task_vars = self._job_vars
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
# Create a new pseudo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task.load(dict(
action='async_status',
args={'jid': async_jid},
check_mode=self._task.check_mode,
environment=self._task.environment,
))
# FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
async_handler = self._shared_loader_obj.action_loader.get(
'ansible.legacy.async_status',
task=async_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
time_left = self._task.async_val
while time_left > 0:
time.sleep(self._task.poll)
try:
async_result = async_handler.run(task_vars=task_vars)
# We do not bail out of the loop in cases where the failure
# is associated with a parsing error. The async_runner can
# have issues which result in a half-written/unparseable result
# file on disk, which manifests to the user as a timeout happening
# before it's time to timeout.
if (int(async_result.get('finished', 0)) == 1 or
('failed' in async_result and async_result.get('_ansible_parsed', False)) or
'skipped' in async_result):
break
except Exception as e:
# Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
# On an exception, call the connection's reset method if it has one
# (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
try:
async_handler._connection.reset()
except AttributeError:
pass
# Little hack to raise the exception if we've exhausted the timeout period
time_left -= self._task.poll
if time_left <= 0:
raise
else:
time_left -= self._task.poll
self._final_q.send_callback(
'v2_runner_on_async_poll',
TaskResult(
self._host.name,
async_task._uuid,
async_result,
task_fields=async_task.dump_attrs(),
),
)
if int(async_result.get('finished', 0)) != 1:
if async_result.get('_ansible_parsed'):
return dict(failed=True, msg="async task did not complete within the requested time - %ss" % self._task.async_val, async_result=async_result)
else:
return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
else:
# If the async task finished, automatically cleanup the temporary
# status file left behind.
cleanup_task = Task.load(
{
'async_status': {
'jid': async_jid,
'mode': 'cleanup',
},
'check_mode': self._task.check_mode,
'environment': self._task.environment,
}
)
cleanup_handler = self._shared_loader_obj.action_loader.get(
'ansible.legacy.async_status',
task=cleanup_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
cleanup_handler.run(task_vars=task_vars)
cleanup_handler.cleanup(force=True)
async_handler.cleanup(force=True)
return async_result
def _get_become(self, name):
become = become_loader.get(name)
if not become:
raise AnsibleError("Invalid become method specified, could not find matching plugin: '%s'. "
"Use `ansible-doc -t become -l` to list available plugins." % name)
return become
def _get_connection(self, cvars, templar, current_connection):
"""
Reads the connection property for the host, and returns the
correct connection object from the list of connection plugins
"""
self._play_context.connection = current_connection
# TODO: play context has logic to update the connection for 'smart'
# (default value, will chose between ssh and paramiko) and 'persistent'
# (really paramiko), eventually this should move to task object itself.
conn_type = self._play_context.connection
connection, plugin_load_context = self._shared_loader_obj.connection_loader.get_with_context(
conn_type,
self._play_context,
self._new_stdin,
task_uuid=self._task._uuid,
ansible_playbook_pid=to_text(os.getppid())
)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
self._set_become_plugin(cvars, templar, connection)
# Also backwards compat call for those still using play_context
self._play_context.set_attributes_from_plugin(connection)
return connection
def _set_become_plugin(self, cvars, templar, connection):
# load become plugin if needed
if cvars.get('ansible_become') is not None:
become = boolean(templar.template(cvars['ansible_become']))
else:
become = self._task.become
if become:
if cvars.get('ansible_become_method'):
become_plugin = self._get_become(templar.template(cvars['ansible_become_method']))
else:
become_plugin = self._get_become(self._task.become_method)
else:
# If become is not enabled on the task it needs to be removed from the connection plugin
# https://github.com/ansible/ansible/issues/78425
become_plugin = None
try:
connection.set_become_plugin(become_plugin)
except AttributeError:
# Older connection plugin that does not support set_become_plugin
pass
if become_plugin:
if getattr(connection.become, 'require_tty', False) and not getattr(connection, 'has_tty', False):
raise AnsibleError(
"The '%s' connection does not provide a TTY which is required for the selected "
"become plugin: %s." % (connection._load_name, become_plugin.name)
)
# Backwards compat for connection plugins that don't support become plugins
# Just do this unconditionally for now, we could move it inside of the
# AttributeError above later
self._play_context.set_become_plugin(become_plugin.name)
def _set_plugin_options(self, plugin_type, variables, templar, task_keys):
try:
plugin = getattr(self._connection, '_%s' % plugin_type)
except AttributeError:
# Some plugins are assigned to private attrs, ``become`` is not
plugin = getattr(self._connection, plugin_type)
# network_cli's "real" connection plugin is not named connection
# to avoid the confusion of having connection.connection
if plugin_type == "ssh_type_conn":
plugin_type = "connection"
option_vars = C.config.get_plugin_vars(plugin_type, plugin._load_name)
options = {}
for k in option_vars:
if k in variables:
options[k] = templar.template(variables[k])
# TODO move to task method?
plugin.set_options(task_keys=task_keys, var_options=options)
return option_vars
def _set_connection_options(self, variables, templar):
# keep list of variable names possibly consumed
varnames = []
# grab list of usable vars for this plugin
option_vars = C.config.get_plugin_vars('connection', self._connection._load_name)
varnames.extend(option_vars)
# create dict of 'templated vars'
options = {'_extras': {}}
for k in option_vars:
if k in variables:
options[k] = templar.template(variables[k])
# add extras if plugin supports them
if getattr(self._connection, 'allow_extras', False):
for k in variables:
if k.startswith('ansible_%s_' % self._connection.extras_prefix) and k not in options:
options['_extras'][k] = templar.template(variables[k])
task_keys = self._task.dump_attrs()
# The task_keys 'timeout' attr is the task's timeout, not the connection timeout.
# The connection timeout is threaded through the play_context for now.
task_keys['timeout'] = self._play_context.timeout
if self._play_context.password:
# The connection password is threaded through the play_context for
# now. This is something we ultimately want to avoid, but the first
# step is to get connection plugins pulling the password through the
# config system instead of directly accessing play_context.
task_keys['password'] = self._play_context.password
# Prevent task retries from overriding connection retries
del task_keys['retries']
# set options with 'templated vars' specific to this plugin and dependent ones
self._connection.set_options(task_keys=task_keys, var_options=options)
varnames.extend(self._set_plugin_options('shell', variables, templar, task_keys))
if self._connection.become is not None:
if self._play_context.become_pass:
# FIXME: eventually remove from task and play_context, here for backwards compat
# keep out of play objects to avoid accidental disclosure, only become plugin should have
# The become pass is already in the play_context if given on
# the CLI (-K). Make the plugin aware of it in this case.
task_keys['become_pass'] = self._play_context.become_pass
varnames.extend(self._set_plugin_options('become', variables, templar, task_keys))
# FOR BACKWARDS COMPAT:
for option in ('become_user', 'become_flags', 'become_exe', 'become_pass'):
try:
setattr(self._play_context, option, self._connection.become.get_option(option))
except KeyError:
pass # some plugins don't support all base flags
self._play_context.prompt = self._connection.become.prompt
# deals with networking sub_plugins (network_cli/httpapi/netconf)
sub = getattr(self._connection, '_sub_plugin', None)
if sub and sub.get('type') != 'external':
plugin_type = get_plugin_class(sub.get("obj"))
varnames.extend(self._set_plugin_options(plugin_type, variables, templar, task_keys))
sub_conn = getattr(self._connection, 'ssh_type_conn', None)
if sub_conn is not None:
varnames.extend(self._set_plugin_options("ssh_type_conn", variables, templar, task_keys))
return varnames
def _get_action_handler(self, templar):
"""
Returns the correct action plugin to handle the requestion task action
"""
return self._get_action_handler_with_module_context(templar)[0]
def _get_action_handler_with_module_context(self, templar):
"""
Returns the correct action plugin to handle the requestion task action and the module context
"""
module_collection, separator, module_name = self._task.action.rpartition(".")
module_prefix = module_name.split('_')[0]
if module_collection:
# For network modules, which look for one action plugin per platform, look for the
# action plugin in the same collection as the module by prefixing the action plugin
# with the same collection.
network_action = "{0}.{1}".format(module_collection, module_prefix)
else:
network_action = module_prefix
collections = self._task.collections
# Check if the module has specified an action handler
module = self._shared_loader_obj.module_loader.find_plugin_with_context(
self._task.action, collection_list=collections
)
if not module.resolved or not module.action_plugin:
module = None
if module is not None:
handler_name = module.action_plugin
# let action plugin override module, fallback to 'normal' action plugin otherwise
elif self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections):
handler_name = self._task.action
elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))):
handler_name = network_action
display.vvvv("Using network group action {handler} for {action}".format(handler=handler_name,
action=self._task.action),
host=self._play_context.remote_addr)
else:
# use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search
handler_name = 'ansible.legacy.normal'
collections = None # until then, we don't want the task's collection list to be consulted; use the builtin
# networking/psersistent connections handling
if any(((self._connection.supports_persistence and C.USE_PERSISTENT_CONNECTIONS), self._connection.force_persistence)):
# check handler in case we dont need to do all the work to setup persistent connection
handler_class = self._shared_loader_obj.action_loader.get(handler_name, class_only=True)
if getattr(handler_class, '_requires_connection', True):
# for persistent connections, initialize socket path and start connection manager
self._play_context.timeout = self._connection.get_option('persistent_command_timeout')
display.vvvv('attempting to start connection', host=self._play_context.remote_addr)
display.vvvv('using connection plugin %s' % self._connection.transport, host=self._play_context.remote_addr)
options = self._connection.get_options()
socket_path = start_connection(self._play_context, options, self._task._uuid)
display.vvvv('local domain socket path is %s' % socket_path, host=self._play_context.remote_addr)
setattr(self._connection, '_socket_path', socket_path)
else:
# TODO: set self._connection to dummy/noop connection, using local for now
self._connection = self._get_connection({}, templar, 'local')
handler = self._shared_loader_obj.action_loader.get(
handler_name,
task=self._task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
collection_list=collections
)
if not handler:
raise AnsibleError("the handler '%s' was not found" % handler_name)
return handler, module
CLI_STUB_NAME = 'ansible_connection_cli_stub.py'
def start_connection(play_context, options, task_uuid):
"""
Starts the persistent connection
"""
env = os.environ.copy()
env.update({
# HACK; most of these paths may change during the controller's lifetime
# (eg, due to late dynamic role includes, multi-playbook execution), without a way
# to invalidate/update, the persistent connection helper won't always see the same plugins the controller
# can.
'ANSIBLE_BECOME_PLUGINS': become_loader.print_paths(),
'ANSIBLE_CLICONF_PLUGINS': cliconf_loader.print_paths(),
'ANSIBLE_COLLECTIONS_PATH': to_native(os.pathsep.join(AnsibleCollectionConfig.collection_paths)),
'ANSIBLE_CONNECTION_PLUGINS': connection_loader.print_paths(),
'ANSIBLE_HTTPAPI_PLUGINS': httpapi_loader.print_paths(),
'ANSIBLE_NETCONF_PLUGINS': netconf_loader.print_paths(),
'ANSIBLE_TERMINAL_PLUGINS': terminal_loader.print_paths(),
})
verbosity = []
if display.verbosity:
verbosity.append('-%s' % ('v' * display.verbosity))
if not (cli_stub_path := C.config.get_config_value('_ANSIBLE_CONNECTION_PATH')):
cli_stub_path = str(pathlib.Path(scripts.__file__).parent / CLI_STUB_NAME)
p = subprocess.Popen(
[sys.executable, cli_stub_path, *verbosity, to_text(os.getppid()), to_text(task_uuid)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
)
write_to_stream(p.stdin, options)
write_to_stream(p.stdin, play_context.serialize())
(stdout, stderr) = p.communicate()
if p.returncode == 0:
result = json.loads(to_text(stdout, errors='surrogate_then_replace'))
else:
try:
result = json.loads(to_text(stderr, errors='surrogate_then_replace'))
except json.decoder.JSONDecodeError:
result = {'error': to_text(stderr, errors='surrogate_then_replace')}
if 'messages' in result:
for level, message in result['messages']:
if level == 'log':
display.display(message, log_only=True)
elif level in ('debug', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv'):
getattr(display, level)(message, host=play_context.remote_addr)
else:
if hasattr(display, level):
getattr(display, level)(message)
else:
display.vvvv(message, host=play_context.remote_addr)
if 'error' in result:
if display.verbosity > 2:
if result.get('exception'):
msg = "The full traceback is:\n" + result['exception']
display.display(msg, color=C.COLOR_ERROR)
raise AnsibleError(result['error'])
return result['socket_path']
| 61,280
|
Python
|
.py
| 1,082
| 42.529575
| 157
| 0.59561
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,333
|
task_result.py
|
ansible_ansible/lib/ansible/executor/task_result.py
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible import constants as C
from ansible.parsing.dataloader import DataLoader
from ansible.vars.clean import module_response_deepcopy, strip_internal_keys
_IGNORE = ('failed', 'skipped')
_PRESERVE = ('attempts', 'changed', 'retries')
_SUB_PRESERVE = {'_ansible_delegated_vars': ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection')}
# stuff callbacks need
CLEAN_EXCEPTIONS = (
'_ansible_verbose_always', # for debug and other actions, to always expand data (pretty jsonification)
'_ansible_item_label', # to know actual 'item' variable
'_ansible_no_log', # jic we didnt clean up well enough, DON'T LOG
'_ansible_verbose_override', # controls display of ansible_facts, gathering would be very noise with -v otherwise
)
class TaskResult:
"""
This class is responsible for interpreting the resulting data
from an executed task, and provides helper methods for determining
the result of a given task.
"""
def __init__(self, host, task, return_data, task_fields=None):
self._host = host
self._task = task
if isinstance(return_data, dict):
self._result = return_data.copy()
else:
self._result = DataLoader().load(return_data)
if task_fields is None:
self._task_fields = dict()
else:
self._task_fields = task_fields
@property
def task_name(self):
return self._task_fields.get('name', None) or self._task.get_name()
def is_changed(self):
return self._check_key('changed')
def is_skipped(self):
# loop results
if 'results' in self._result:
results = self._result['results']
# Loop tasks are only considered skipped if all items were skipped.
# some squashed results (eg, dnf) are not dicts and can't be skipped individually
if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
return True
# regular tasks and squashed non-dict results
return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
return self._check_key('failed_when_result')
else:
return self._check_key('failed')
def is_unreachable(self):
return self._check_key('unreachable')
def needs_debugger(self, globally_enabled=False):
_debugger = self._task_fields.get('debugger')
_ignore_errors = C.TASK_DEBUGGER_IGNORE_ERRORS and self._task_fields.get('ignore_errors')
ret = False
if globally_enabled and ((self.is_failed() and not _ignore_errors) or self.is_unreachable()):
ret = True
if _debugger in ('always',):
ret = True
elif _debugger in ('never',):
ret = False
elif _debugger in ('on_failed',) and self.is_failed() and not _ignore_errors:
ret = True
elif _debugger in ('on_unreachable',) and self.is_unreachable():
ret = True
elif _debugger in ('on_skipped',) and self.is_skipped():
ret = True
return ret
def _check_key(self, key):
"""get a specific key from the result or its items"""
if isinstance(self._result, dict) and key in self._result:
return self._result.get(key, False)
else:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):
flag |= res.get(key, False)
return flag
def clean_copy(self):
""" returns 'clean' taskresult object """
# FIXME: clean task_fields, _task and _host copies
result = TaskResult(self._host, self._task, {}, self._task_fields)
# statuses are already reflected on the event type
if result._task and result._task.action in C._ACTION_DEBUG:
# debug is verbose by default to display vars, no need to add invocation
ignore = _IGNORE + ('invocation',)
else:
ignore = _IGNORE
subset = {}
# preserve subset for later
for sub in _SUB_PRESERVE:
if sub in self._result:
subset[sub] = {}
for key in _SUB_PRESERVE[sub]:
if key in self._result[sub]:
subset[sub][key] = self._result[sub][key]
if isinstance(self._task.no_log, bool) and self._task.no_log or self._result.get('_ansible_no_log', False):
x = {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}
# preserve full
for preserve in _PRESERVE:
if preserve in self._result:
x[preserve] = self._result[preserve]
result._result = x
elif self._result:
result._result = module_response_deepcopy(self._result)
# actually remove
for remove_key in ignore:
if remove_key in result._result:
del result._result[remove_key]
# remove almost ALL internal keys, keep ones relevant to callback
strip_internal_keys(result._result, exceptions=CLEAN_EXCEPTIONS)
# keep subset
result._result.update(subset)
return result
| 5,696
|
Python
|
.py
| 119
| 38.016807
| 124
| 0.615371
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,334
|
playbook_executor.py
|
ansible_ansible/lib/ansible/executor/playbook_executor.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from ansible import constants as C
from ansible import context
from ansible.executor.task_queue_manager import TaskQueueManager, AnsibleEndPlay
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.loader import become_loader, connection_loader, shell_loader
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
from ansible.utils.path import makedirs_safe
from ansible.utils.ssh_functions import set_default_transport
from ansible.utils.display import Display
display = Display()
class PlaybookExecutor:
"""
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
"""
def __init__(self, playbooks, inventory, variable_manager, loader, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self.passwords = passwords
self._unreachable_hosts = dict()
if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \
context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'):
self._tqm = None
else:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=self.passwords,
forks=context.CLIARGS.get('forks'),
)
# Note: We run this here to cache whether the default ansible ssh
# executable supports control persist. Sometime in the future we may
# need to enhance this to check that ansible_ssh_executable specified
# in inventory is also cached. We can't do this caching at the point
# where it is used (in task_executor) because that is post-fork and
# therefore would be discarded after every task.
set_default_transport()
def run(self):
"""
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
"""
result = 0
entrylist = []
entry = {}
try:
# preload become/connection/shell to set config defs cached
list(connection_loader.all(class_only=True))
list(shell_loader.all(class_only=True))
list(become_loader.all(class_only=True))
for playbook in self._playbooks:
# deal with FQCN
resource = _get_collection_playbook_path(playbook)
if resource is not None:
playbook_path = resource[1]
playbook_collection = resource[2]
else:
playbook_path = playbook
# not fqcn, but might still be collection playbook
playbook_collection = _get_collection_name_from_path(playbook)
if playbook_collection:
display.v("running playbook inside collection {0}".format(playbook_collection))
AnsibleCollectionConfig.default_collection = playbook_collection
else:
AnsibleCollectionConfig.default_collection = None
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
# FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
# Allow variables to be used in vars_prompt fields.
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
setattr(play, 'vars_prompt', templar.template(play.vars_prompt))
# FIXME: this should be a play 'sub object' like loop_control
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = boolean(var.get("private", True))
confirm = boolean(var.get("confirm", False))
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
unsafe = boolean(var.get("unsafe", False))
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt,
default, unsafe)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Post validate so any play level variables are templated
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
play.post_validate(templar)
if context.CLIARGS['syntax']:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
batches = self._get_serialized_batches(play)
if len(batches) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
for batch in batches:
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
try:
result = self._tqm.run(play=play)
except AnsibleEndPlay as e:
result = e.result
break
# break the play if the result equals the special return code
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here and break out if the entire batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
if break_play:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
if C.RETRY_FILES_ENABLED:
retries = set(self._tqm._failed_hosts.keys())
retries.update(self._tqm._unreachable_hosts.keys())
retries = sorted(retries)
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.RETRY_FILES_SAVE_PATH
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = '~/'
(retry_name, ext) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
if self._generate_retry_inventory(filename, retries):
display.display("\tto retry, use: --limit @%s\n" % filename)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._tqm.cleanup()
if self._loader:
self._loader.cleanup_all_tmp_files()
if context.CLIARGS['syntax']:
display.display("No issues encountered")
return result
if context.CLIARGS['start_at_task'] and not self._tqm._start_at_done:
display.error(
"No matching task \"%s\" found."
" Note: --start-at-task can only follow static includes."
% context.CLIARGS['start_at_task']
)
return result
def _get_serialized_batches(self, play):
"""
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
"""
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts, order=play.order)
all_hosts_len = len(all_hosts)
# the serial value can be listed as a scalar or a list of
# scalars, so we make sure it's a list here
serial_batch_list = play.serial
if len(serial_batch_list) == 0:
serial_batch_list = [-1]
cur_item = 0
serialized_batches = []
while len(all_hosts) > 0:
# get the serial value from current item in the list
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise grab a chunk of the hosts equal
# to the current serial item size
if serial <= 0:
serialized_batches.append(all_hosts)
break
else:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
# increment the current batch list item number, and if we've hit
# the end keep using the last element until we've consumed all of
# the hosts in the inventory
cur_item += 1
if cur_item > len(serial_batch_list) - 1:
cur_item = len(serial_batch_list) - 1
return serialized_batches
def _generate_retry_inventory(self, retry_path, replay_hosts):
"""
Called when a playbook run fails. It generates an inventory which allows
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
"""
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_text(e)))
return False
return True
| 14,764
|
Python
|
.py
| 272
| 37.477941
| 152
| 0.550714
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,335
|
module_common.py
|
ansible_ansible/lib/ansible/executor/module_common.py
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import ast
import base64
import datetime
import json
import os
import shlex
import time
import zipfile
import re
import pkgutil
from ast import AST, Import, ImportFrom
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils.common.json import AnsibleJSONEncoder
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, _nested_dict_get
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
from collections import namedtuple
import importlib.util
import importlib.machinery
display = Display()
ModuleUtilsProcessEntry = namedtuple('ModuleUtilsProcessEntry', ['name_parts', 'is_ambiguous', 'has_redirected_child', 'is_optional'])
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u"""%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
import os
import os.path
# Access to the working directory is required by Python when using pipelining, as well as for the coverage module.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
try:
os.chdir(os.path.expanduser('~'))
except OSError:
os.chdir('/')
%(rlimit)s
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
# Strip cwd from sys.path to avoid potential permissions issues
excludes = set(('', '.', scriptdir))
sys.path = [p for p in sys.path if p not in excludes]
import base64
import runpy
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
PY3 = False
else:
PY3 = True
ZIPDATA = %(zipdata)r
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = %(date_time)s
z.writestr(zinfo, sitecustomize)
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name=%(module_fqn)r, init_globals=dict(_module_fqn=%(module_fqn)r, _modlib_path=modlib_path),
run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name=%(module_fqn)r, init_globals=None, run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_' + %(ansible_module)r + '_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_' + %(ansible_module)r + '_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
"""
ANSIBALLZ_COVERAGE_TEMPLATE = """
os.environ['COVERAGE_FILE'] = %(coverage_output)r + '=python-%%s=coverage' %% '.'.join(str(v) for v in sys.version_info[:2])
import atexit
try:
import coverage
except ImportError:
print('{"msg": "Could not import `coverage` module.", "failed": true}')
sys.exit(1)
cov = coverage.Coverage(config_file=%(coverage_config)r)
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
"""
ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = """
try:
if PY3:
import importlib.util
if importlib.util.find_spec('coverage') is None:
raise ImportError
else:
import imp
imp.find_module('coverage')
except ImportError:
print('{"msg": "Could not find `coverage` module.", "failed": true}')
sys.exit(1)
"""
ANSIBALLZ_RLIMIT_TEMPLATE = """
import resource
existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# adjust soft limit subject to existing hard limit
requested_soft = min(existing_hard, %(rlimit_nofile)d)
if requested_soft != existing_soft:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
except ValueError:
# some platforms (eg macOS) lie about their hard limit
pass
"""
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages
# Do this instead of getting site-packages from distutils.sysconfig so we work when we
# haven't been installed
site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?P<path>ansible/modules/.*)\.(py|ps1)$' % re.escape(site_packages))
COLLECTION_PATH_RE = re.compile(r'/(?P<path>ansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.(py|ps1)$')
# Detect new-style Python modules by looking for required imports:
# import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util]
# from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util]
# import ansible.module_utils[.basic]
# from ansible.module_utils[ import basic]
# from ansible.module_utils[.basic import AnsibleModule]
# from ..module_utils[ import basic]
# from ..module_utils[.basic import AnsibleModule]
NEW_STYLE_PYTHON_MODULE_RE = re.compile(
# Relative imports
br'(?:from +\.{2,} *module_utils.* +import |'
# Collection absolute imports:
br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |'
br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|'
# Core absolute imports
br'from +ansible\.module_utils.* +import |'
br'import +ansible\.module_utils\.)'
)
class ModuleDepFinder(ast.NodeVisitor):
def __init__(self, module_fqn, tree, is_pkg_init=False, *args, **kwargs):
"""
Walk the ast tree for the python module.
:arg module_fqn: The fully qualified name to reach this module in dotted notation.
example: ansible.module_utils.basic
:arg is_pkg_init: Inform the finder it's looking at a package init (eg __init__.py) to allow
relative import expansion to use the proper package level without having imported it locally first.
Save submodule[.submoduleN][.identifier] into self.submodules
when they are from ansible.module_utils or ansible_collections packages
self.submodules will end up with tuples like:
- ('ansible', 'module_utils', 'basic',)
- ('ansible', 'module_utils', 'urls', 'fetch_url')
- ('ansible', 'module_utils', 'database', 'postgres')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo')
It's up to calling code to determine whether the final element of the
tuple are module names or something else (function, class, or variable names)
.. seealso:: :python3:class:`ast.NodeVisitor`
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self._tree = tree # squirrel this away so we can compare node parents to it
self.submodules = set()
self.optional_imports = set()
self.module_fqn = module_fqn
self.is_pkg_init = is_pkg_init
self._visit_map = {
Import: self.visit_Import,
ImportFrom: self.visit_ImportFrom,
}
self.visit(tree)
def generic_visit(self, node):
"""Overridden ``generic_visit`` that makes some assumptions about our
use case, and improves performance by calling visitors directly instead
of calling ``visit`` to offload calling visitors.
"""
generic_visit = self.generic_visit
visit_map = self._visit_map
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, (Import, ImportFrom)):
item.parent = node
visit_map[item.__class__](item)
elif isinstance(item, AST):
generic_visit(item)
visit = generic_visit
def visit_Import(self, node):
"""
Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
for alias in node.names:
if (alias.name.startswith('ansible.module_utils.') or
alias.name.startswith('ansible_collections.')):
py_mod = tuple(alias.name.split('.'))
self.submodules.add(py_mod)
# if the import's parent is the root document, it's a required import, otherwise it's optional
if node.parent != self._tree:
self.optional_imports.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
"""
Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname]
Also has to handle relative imports
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
# FIXME: These should all get skipped:
# from ansible.executor import module_common
# from ...executor import module_common
# from ... import executor (Currently it gives a non-helpful error)
if node.level > 0:
# if we're in a package init, we have to add one to the node level (and make it none if 0 to preserve the right slicing behavior)
level_slice_offset = -node.level + 1 or None if self.is_pkg_init else -node.level
if self.module_fqn:
parts = tuple(self.module_fqn.split('.'))
if node.module:
# relative import: from .module import x
node_module = '.'.join(parts[:level_slice_offset] + (node.module,))
else:
# relative import: from . import x
node_module = '.'.join(parts[:level_slice_offset])
else:
# fall back to an absolute import
node_module = node.module
else:
# absolute import: from module import x
node_module = node.module
# Specialcase: six is a special case because of its
# import logic
py_mod = None
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node_module.startswith('ansible.module_utils'):
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
# from ansible.module_utils import MODULE1 [,MODULEn] [as asname]
py_mod = tuple(node_module.split('.'))
elif node_module.startswith('ansible_collections.'):
if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module:
# from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname]
# from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname]
# FIXME: Unhandled cornercase (needs to be ignored):
# from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER
py_mod = tuple(node_module.split('.'))
else:
# Not from module_utils so ignore. for instance:
# from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER
pass
if py_mod:
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
# if the import's parent is the root document, it's a required import, otherwise it's optional
if node.parent != self._tree:
self.optional_imports.add(py_mod + (alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
with open(path, 'rb') as fd:
data = fd.read()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=False):
"""
Handles the different ways ansible allows overriding the shebang target for a module.
"""
# FUTURE: add logical equivalence for python3 in the case of py3-only modules
interpreter_name = os.path.basename(interpreter).strip()
# name for interpreter var
interpreter_config = u'ansible_%s_interpreter' % interpreter_name
# key for config
interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
interpreter_out = None
# looking for python, rest rely on matching vars
if interpreter_name == 'python':
# skip detection for network os execution, use playbook supplied one if possible
if remote_is_local:
interpreter_out = task_vars['ansible_playbook_python']
# a config def exists for this interpreter type; consult config for the value
elif C.config.get_configuration_definition(interpreter_config_key):
interpreter_from_config = C.config.get_config_value(interpreter_config_key, variables=task_vars)
interpreter_out = templar.template(interpreter_from_config.strip())
# handle interpreter discovery if requested or empty interpreter was provided
if not interpreter_out or interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
facts_from_task_vars = task_vars.get('ansible_facts', {})
if discovered_interpreter_config not in facts_from_task_vars:
# interpreter discovery is desired, but has not been run for this host
raise InterpreterDiscoveryRequiredError("interpreter discovery needed", interpreter_name=interpreter_name, discovery_mode=interpreter_out)
else:
interpreter_out = facts_from_task_vars[discovered_interpreter_config]
else:
raise InterpreterDiscoveryRequiredError("interpreter discovery required", interpreter_name=interpreter_name, discovery_mode='auto_legacy')
elif interpreter_config in task_vars:
# for non python we consult vars for a possible direct override
interpreter_out = templar.template(task_vars.get(interpreter_config).strip())
if not interpreter_out:
# nothing matched(None) or in case someone configures empty string or empty intepreter
interpreter_out = interpreter
# set shebang
shebang = u'#!{0}'.format(interpreter_out)
if args:
shebang = shebang + u' ' + u' '.join(args)
return shebang, interpreter_out
class ModuleUtilLocatorBase:
def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
self._is_ambiguous = is_ambiguous
# a child package redirection could cause intermediate package levels to be missing, eg
# from ansible.module_utils.x.y.z import foo; if x.y.z.foo is redirected, we may not have packages on disk for
# the intermediate packages x.y.z, so we'll need to supply empty packages for those
self._child_is_redirected = child_is_redirected
self._is_optional = is_optional
self.found = False
self.redirected = False
self.fq_name_parts = fq_name_parts
self.source_code = ''
self.output_path = ''
self.is_package = False
self._collection_name = None
# for ambiguous imports, we should only test for things more than one level below module_utils
# this lets us detect erroneous imports and redirections earlier
if is_ambiguous and len(self._get_module_utils_remainder_parts(fq_name_parts)) > 1:
self.candidate_names = [fq_name_parts, fq_name_parts[:-1]]
else:
self.candidate_names = [fq_name_parts]
@property
def candidate_names_joined(self):
return ['.'.join(n) for n in self.candidate_names]
def _handle_redirect(self, name_parts):
module_utils_relative_parts = self._get_module_utils_remainder_parts(name_parts)
# only allow redirects from below module_utils- if above that, bail out (eg, parent package names)
if not module_utils_relative_parts:
return False
try:
collection_metadata = _get_collection_metadata(self._collection_name)
except ValueError as ve: # collection not found or some other error related to collection load
if self._is_optional:
return False
raise AnsibleError('error processing module_util {0} loading redirected collection {1}: {2}'
.format('.'.join(name_parts), self._collection_name, to_native(ve)))
routing_entry = _nested_dict_get(collection_metadata, ['plugin_routing', 'module_utils', '.'.join(module_utils_relative_parts)])
if not routing_entry:
return False
# FIXME: add deprecation warning support
dep_or_ts = routing_entry.get('tombstone')
removed = dep_or_ts is not None
if not removed:
dep_or_ts = routing_entry.get('deprecation')
if dep_or_ts:
removal_date = dep_or_ts.get('removal_date')
removal_version = dep_or_ts.get('removal_version')
warning_text = dep_or_ts.get('warning_text')
msg = 'module_util {0} has been removed'.format('.'.join(name_parts))
if warning_text:
msg += ' ({0})'.format(warning_text)
else:
msg += '.'
display.deprecated(msg, removal_version, removed, removal_date, self._collection_name)
if 'redirect' in routing_entry:
self.redirected = True
source_pkg = '.'.join(name_parts)
self.is_package = True # treat all redirects as packages
redirect_target_pkg = routing_entry['redirect']
# expand FQCN redirects
if not redirect_target_pkg.startswith('ansible_collections'):
split_fqcn = redirect_target_pkg.split('.')
if len(split_fqcn) < 3:
raise Exception('invalid redirect for {0}: {1}'.format(source_pkg, redirect_target_pkg))
# assume it's an FQCN, expand it
redirect_target_pkg = 'ansible_collections.{0}.{1}.plugins.module_utils.{2}'.format(
split_fqcn[0], # ns
split_fqcn[1], # coll
'.'.join(split_fqcn[2:]) # sub-module_utils remainder
)
display.vvv('redirecting module_util {0} to {1}'.format(source_pkg, redirect_target_pkg))
self.source_code = self._generate_redirect_shim_source(source_pkg, redirect_target_pkg)
return True
return False
def _get_module_utils_remainder_parts(self, name_parts):
# subclasses should override to return the name parts after module_utils
return []
def _get_module_utils_remainder(self, name_parts):
# return the remainder parts as a package string
return '.'.join(self._get_module_utils_remainder_parts(name_parts))
def _find_module(self, name_parts):
return False
def _locate(self, redirect_first=True):
for candidate_name_parts in self.candidate_names:
if redirect_first and self._handle_redirect(candidate_name_parts):
break
if self._find_module(candidate_name_parts):
break
if not redirect_first and self._handle_redirect(candidate_name_parts):
break
else: # didn't find what we were looking for- last chance for packages whose parents were redirected
if self._child_is_redirected: # make fake packages
self.is_package = True
self.source_code = ''
else: # nope, just bail
return
if self.is_package:
path_parts = candidate_name_parts + ('__init__',)
else:
path_parts = candidate_name_parts
self.found = True
self.output_path = os.path.join(*path_parts) + '.py'
self.fq_name_parts = candidate_name_parts
def _generate_redirect_shim_source(self, fq_source_module, fq_target_module):
return """
import sys
import {1} as mod
sys.modules['{0}'] = mod
""".format(fq_source_module, fq_target_module)
# FIXME: add __repr__ impl
class LegacyModuleUtilLocator(ModuleUtilLocatorBase):
def __init__(self, fq_name_parts, is_ambiguous=False, mu_paths=None, child_is_redirected=False):
super(LegacyModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected)
if fq_name_parts[0:2] != ('ansible', 'module_utils'):
raise Exception('this class can only locate from ansible.module_utils, got {0}'.format(fq_name_parts))
if fq_name_parts[2] == 'six':
# FIXME: handle the ansible.module_utils.six._six case with a redirect or an internal _six attr on six itself?
# six creates its submodules at runtime; convert all these to just 'ansible.module_utils.six'
fq_name_parts = ('ansible', 'module_utils', 'six')
self.candidate_names = [fq_name_parts]
self._mu_paths = mu_paths
self._collection_name = 'ansible.builtin' # legacy module utils always look in ansible.builtin for redirects
self._locate(redirect_first=False) # let local stuff override redirects for legacy
def _get_module_utils_remainder_parts(self, name_parts):
return name_parts[2:] # eg, foo.bar for ansible.module_utils.foo.bar
def _find_module(self, name_parts):
rel_name_parts = self._get_module_utils_remainder_parts(name_parts)
# no redirection; try to find the module
if len(rel_name_parts) == 1: # direct child of module_utils, just search the top-level dirs we were given
paths = self._mu_paths
else: # a nested submodule of module_utils, extend the paths given with the intermediate package names
paths = [os.path.join(p, *rel_name_parts[:-1]) for p in
self._mu_paths] # extend the MU paths with the relative bit
# find_spec needs the full module name
self._info = info = importlib.machinery.PathFinder.find_spec('.'.join(name_parts), paths)
if info is not None and os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES:
self.is_package = info.origin.endswith('/__init__.py')
path = info.origin
else:
return False
self.source_code = _slurp(path)
return True
class CollectionModuleUtilLocator(ModuleUtilLocatorBase):
def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
super(CollectionModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected, is_optional)
if fq_name_parts[0] != 'ansible_collections':
raise Exception('CollectionModuleUtilLocator can only locate from ansible_collections, got {0}'.format(fq_name_parts))
elif len(fq_name_parts) >= 6 and fq_name_parts[3:5] != ('plugins', 'module_utils'):
raise Exception('CollectionModuleUtilLocator can only locate below ansible_collections.(ns).(coll).plugins.module_utils, got {0}'
.format(fq_name_parts))
self._collection_name = '.'.join(fq_name_parts[1:3])
self._locate()
def _find_module(self, name_parts):
# synthesize empty inits for packages down through module_utils- we don't want to allow those to be shipped over, but the
# package hierarchy needs to exist
if len(name_parts) < 6:
self.source_code = ''
self.is_package = True
return True
# NB: we can't use pkgutil.get_data safely here, since we don't want to import/execute package/module code on
# the controller while analyzing/assembling the module, so we'll have to manually import the collection's
# Python package to locate it (import root collection, reassemble resource path beneath, fetch source)
collection_pkg_name = '.'.join(name_parts[0:3])
resource_base_path = os.path.join(*name_parts[3:])
src = None
# look for package_dir first, then module
try:
src = pkgutil.get_data(collection_pkg_name, to_native(os.path.join(resource_base_path, '__init__.py')))
except ImportError:
pass
# TODO: we might want to synthesize fake inits for py3-style packages, for now they're required beneath module_utils
if src is not None: # empty string is OK
self.is_package = True
else:
try:
src = pkgutil.get_data(collection_pkg_name, to_native(resource_base_path + '.py'))
except ImportError:
pass
if src is None: # empty string is OK
return False
self.source_code = src
return True
def _get_module_utils_remainder_parts(self, name_parts):
return name_parts[5:] # eg, foo.bar for ansible_collections.ns.coll.plugins.module_utils.foo.bar
def _make_zinfo(filename, date_time, zf=None):
zinfo = zipfile.ZipInfo(
filename=filename,
date_time=date_time
)
if zf:
zinfo.compress_type = zf.compression
return zinfo
def recursive_finder(name, module_fqn, module_data, zf, date_time=None):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module and its module_utils files needs. (no longer actually recursive)
:arg name: Name of the python module we're examining
:arg module_fqn: Fully qualified name of the python module we're scanning
:arg module_data: string Python code of the module we're scanning
:arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
which we're assembling
"""
if date_time is None:
date_time = time.gmtime()[:6]
# py_module_cache maps python module names to a tuple of the code in the module
# and the pathname to the module.
# Here we pre-load it with modules which we create without bothering to
# read from actual files (In some cases, these need to differ from what ansible
# ships because they're namespace packages in the module)
# FIXME: do we actually want ns pkg behavior for these? Seems like they should just be forced to emptyish pkg stubs
py_module_cache = {
('ansible',): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n'
b'__version__="' + to_bytes(__version__) +
b'"\n__author__="' + to_bytes(__author__) + b'"\n',
'ansible/__init__.py'),
('ansible', 'module_utils'): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n',
'ansible/module_utils/__init__.py')}
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
# Parse the module code and find the imports of ansible.module_utils
try:
tree = compile(module_data, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder(module_fqn, tree)
# the format of this set is a tuple of the module name and whether or not the import is ambiguous as a module name
# or an attribute of a module (eg from x.y import z <-- is z a module or an attribute of x.y?)
modules_to_process = [ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules]
# HACK: basic is currently always required since module global init is currently tied up with AnsiballZ arg input
modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False, is_optional=False))
# we'll be adding new modules inline as we discover them, so just keep going til we've processed them all
while modules_to_process:
modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order
py_module_name, is_ambiguous, child_is_redirected, is_optional = modules_to_process.pop(0)
if py_module_name in py_module_cache:
# this is normal; we'll often see the same module imported many times, but we only need to process it once
continue
if py_module_name[0:2] == ('ansible', 'module_utils'):
module_info = LegacyModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
mu_paths=module_utils_paths, child_is_redirected=child_is_redirected)
elif py_module_name[0] == 'ansible_collections':
module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
child_is_redirected=child_is_redirected, is_optional=is_optional)
else:
# FIXME: dot-joined result
display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
% [py_module_name])
continue
# Could not find the module. Construct a helpful error message.
if not module_info.found:
if is_optional:
# this was a best-effort optional import that we couldn't find, oh well, move along...
continue
# FIXME: use dot-joined candidate names
msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
raise AnsibleError(msg)
# check the cache one more time with the module we actually found, since the name could be different than the input
# eg, imported name vs module
if module_info.fq_name_parts in py_module_cache:
continue
# compile the source, process all relevant imported modules
try:
tree = compile(module_info.source_code, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (module_info.fq_name_parts, e.msg))
finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), tree, module_info.is_package)
modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports)
for m in finder.submodules if m not in py_module_cache)
# we've processed this item, add it to the output list
py_module_cache[module_info.fq_name_parts] = (module_info.source_code, module_info.output_path)
# ensure we process all ancestor package inits
accumulated_pkg_name = []
for pkg in module_info.fq_name_parts[:-1]:
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
modules_to_process.append(ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=is_optional))
for py_module_name in py_module_cache:
py_module_file_name = py_module_cache[py_module_name][1]
zf.writestr(
_make_zinfo(py_module_file_name, date_time, zf=zf),
py_module_cache[py_module_name][0]
)
mu_file = to_text(py_module_file_name, errors='surrogate_or_strict')
display.vvvvv("Including module_utils file %s" % mu_file)
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _get_ansible_module_fqn(module_path):
"""
Get the fully qualified name for an ansible module based on its pathname
remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping
Or ansible_collections.Namespace.Collection_name.plugins.modules.ping
.. warning:: This function is for ansible modules only. It won't work for other things
(non-module plugins, etc)
"""
remote_module_fqn = None
# Is this a core module?
match = CORE_LIBRARY_PATH_RE.search(module_path)
if not match:
# Is this a module in a collection?
match = COLLECTION_PATH_RE.search(module_path)
# We can tell the FQN for core modules and collection modules
if match:
path = match.group('path')
if '.' in path:
# FQNs must be valid as python identifiers. This sanity check has failed.
# we could check other things as well
raise ValueError('Module name (or path) was not a valid python identifier')
remote_module_fqn = '.'.join(path.split('/'))
else:
# Currently we do not handle modules in roles so we can end up here for that reason
raise ValueError("Unable to determine module's fully qualified name")
return remote_module_fqn
def _add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data):
"""Add a module from ansible or from an ansible collection into the module zip"""
module_path_parts = remote_module_fqn.split('.')
# Write the module
module_path = '/'.join(module_path_parts) + '.py'
zf.writestr(
_make_zinfo(module_path, date_time, zf=zf),
b_module_data
)
# Write the __init__.py's necessary to get there
if module_path_parts[0] == 'ansible':
# The ansible namespace is setup as part of the module_utils setup...
start = 2
existing_paths = frozenset()
else:
# ... but ansible_collections and other toplevels are not
start = 1
existing_paths = frozenset(zf.namelist())
for idx in range(start, len(module_path_parts)):
package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py'
# If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder.
if package_path in existing_paths:
continue
# Note: We don't want to include more than one ansible module in a payload at this time
# so no need to fill the __init__.py with namespace code
zf.writestr(
_make_zinfo(package_path, date_time, zf=zf),
b''
)
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment, remote_is_local=False):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data):
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
try:
remote_module_fqn = _get_ansible_module_fqn(module_path)
except ValueError:
# Modules in roles currently are not found by the fqn heuristic so we
# fallback to this. This means that relative imports inside a module from
# a role may fail. Absolute imports should be used for future-proofness.
# People should start writing collections instead of modules in roles so we
# may never fix this
display.debug('ANSIBALLZ: Could not determine module FQN')
remote_module_fqn = 'ansible.modules.%s' % module_name
if module_substyle == 'python':
date_time = time.gmtime()[:6]
if date_time[0] < 1980:
date_string = datetime.datetime(*date_time, tzinfo=datetime.timezone.utc).strftime('%c')
raise AnsibleError(f'Cannot create zipfile due to pre-1980 configured date: {date_string}')
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
try:
python_repred_params = repr(json.dumps(params, cls=AnsibleJSONEncoder, vault_to_text=True))
except TypeError as e:
raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (remote_module_fqn, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# walk the module imports, looking for module_utils to send- they'll be added to the zipfile
recursive_finder(module_name, remote_module_fqn, b_module_data, zf, date_time)
display.debug('ANSIBALLZ: Writing module into payload')
_add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
#
# FIXME: Once split controller/remote is merged, this can be simplified to
# os.makedirs(lookup_path, exist_ok=True)
if not os.path.exists(lookup_path):
try:
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
except OSError:
# Multiple processes tried to create the directory. If it still does not
# exist, raise the original exception.
if not os.path.exists(lookup_path):
raise
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
with open(cached_module_filename, 'rb') as f:
zipdata = f.read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
o_interpreter, o_args = _extract_interpreter(b_module_data)
if o_interpreter is None:
o_interpreter = u'/usr/bin/python'
shebang, interpreter = _get_shebang(o_interpreter, task_vars, templar, o_args, remote_is_local=remote_is_local)
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar.template(rlimit_nofile))
if rlimit_nofile:
rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
rlimit_nofile=rlimit_nofile,
)
else:
rlimit = ''
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
if coverage_output:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=coverage_output,
)
else:
# Verify coverage is available without importing it.
# This will detect when a module would fail with coverage enabled with minimal overhead.
coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
else:
coverage = ''
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
module_fqn=remote_module_fqn,
params=python_repred_params,
shebang=shebang,
coding=ENCODING_STRING,
date_time=date_time,
coverage=coverage,
rlimit=rlimit,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_path, module_args, environment,
async_timeout, become, become_method, become_user, become_password,
become_flags, module_substyle, task_vars, remote_module_fqn
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args, cls=AnsibleJSONEncoder, vault_to_text=True))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def _extract_interpreter(b_module_data):
"""
Used to extract shebang expression from binary module data and return a text
string with the shebang, or None if no shebang is detected.
"""
interpreter = None
args = []
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split needs text on Python 3
cli_split = shlex.split(to_text(b_shebang[2:], errors='surrogate_or_strict'))
# convert args to text
cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split]
interpreter = cli_split[0]
args = cli_split[1:]
return interpreter, args
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None, remote_is_local=False):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment, remote_is_local=remote_is_local)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
interpreter, args = _extract_interpreter(b_module_data)
# No interpreter/shebang, assume a binary module?
if interpreter is not None:
shebang, new_interpreter = _get_shebang(interpreter, task_vars, templar, args, remote_is_local=remote_is_local)
# update shebang
b_lines = b_module_data.split(b"\n", 1)
if interpreter != new_interpreter:
b_lines[0] = to_bytes(shebang, errors='surrogate_or_strict', nonstring='passthru')
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
def get_action_args_with_defaults(action, args, defaults, templar, action_groups=None):
# Get the list of groups that contain this action
if action_groups is None:
msg = (
"Finding module_defaults for action %s. "
"The caller has not passed the action_groups, so any "
"that may include this action will be ignored."
)
display.warning(msg=msg)
group_names = []
else:
group_names = action_groups.get(action, [])
tmp_args = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
if isinstance(defaults, list):
for default in defaults:
module_defaults.update(default)
# module_defaults keys are static, but the values may be templated
module_defaults = templar.template(module_defaults)
for default in module_defaults:
if default.startswith('group/'):
group_name = default.split('group/')[-1]
if group_name in group_names:
tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
# handle specific action defaults
tmp_args.update(module_defaults.get(action, {}).copy())
# direct args override all
tmp_args.update(args)
return tmp_args
| 65,755
|
Python
|
.py
| 1,186
| 45.597808
| 158
| 0.649
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,336
|
__init__.py
|
ansible_ansible/lib/ansible/executor/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
| 749
|
Python
|
.py
| 17
| 43
| 70
| 0.77565
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,337
|
action_write_locks.py
|
ansible_ansible/lib/ansible/executor/action_write_locks.py
|
# (c) 2016 - Red Hat, Inc. <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import multiprocessing.synchronize
from multiprocessing import Lock
from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
if 'action_write_locks' not in globals():
# Do not initialize this more than once because it seems to bash
# the existing one. multiprocessing must be reloading the module
# when it forks?
action_write_locks: dict[str | None, multiprocessing.synchronize.Lock] = dict()
# Below is a Lock for use when we weren't expecting a named module. It gets used when an action
# plugin invokes a module whose name does not match with the action's name. Slightly less
# efficient as all processes with unexpected module names will wait on this lock
action_write_locks[None] = Lock()
# These plugins are known to be called directly by action plugins with names differing from the
# action plugin name. We precreate them here as an optimization.
# If a list of service managers is created in the future we can do the same for them.
mods = set(p['name'] for p in PKG_MGRS)
mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
for mod_name in mods:
action_write_locks[mod_name] = Lock()
| 1,915
|
Python
|
.py
| 36
| 50.194444
| 100
| 0.747728
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,338
|
play_iterator.py
|
ansible_ansible/lib/ansible/executor/play_iterator.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import fnmatch
from enum import IntEnum, IntFlag
from ansible import constants as C
from ansible.errors import AnsibleAssertionError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator', 'IteratingStates', 'FailedStates']
class IteratingStates(IntEnum):
SETUP = 0
TASKS = 1
RESCUE = 2
ALWAYS = 3
HANDLERS = 4
COMPLETE = 5
class FailedStates(IntFlag):
NONE = 0
SETUP = 1
TASKS = 2
RESCUE = 4
ALWAYS = 8
HANDLERS = 16 # NOTE not in use anymore
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.handlers = []
self.handler_notifications = []
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_handlers_task = 0
self.run_state = IteratingStates.SETUP
self.fail_state = FailedStates.NONE
self.pre_flushing_run_state = None
self.update_handlers = True
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, handlers=%d, run_state=%s, fail_state=%s, "
"pre_flushing_run_state=%s, update_handlers=%s, pending_setup=%s, "
"tasks child state? (%s), rescue child state? (%s), always child state? (%s), "
"did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.cur_handlers_task,
self.run_state,
self.fail_state,
self.pre_flushing_run_state,
self.update_handlers,
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks',
'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_handlers_task',
'run_state', 'fail_state', 'pre_flushing_run_state', 'update_handlers', 'pending_setup',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.handlers = self.handlers[:]
new_state.handler_notifications = self.handler_notifications[:]
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.cur_handlers_task = self.cur_handlers_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pre_flushing_run_state = self.pre_flushing_run_state
new_state.update_handlers = self.update_handlers
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
# TODO: hardcoded resolution here, but should use actual resolution code in the end,
# in case of 'legacy' mismatch
setup_task.resolved_action = 'ansible.builtin.gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
# Default options to gather
for option in ('gather_subset', 'gather_timeout', 'fact_path'):
value = getattr(self._play, option, None)
if value is not None:
setup_task.args[option] = value
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
# keep flatten (no blocks) list of all tasks from the play
# used for the lockstep mechanism in the linear strategy
self.all_tasks = setup_block.get_tasks()
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self.all_tasks.extend(new_block.get_tasks())
# keep list of all handlers, it is copied into each HostState
# at the beginning of IteratingStates.HANDLERS
# the copy happens at each flush in order to restore the original
# list and remove any included handlers that might not be notified
# at the particular flush
self.handlers = [h for b in self._play.handlers for h in b.block]
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self.set_state_for_host(host.name, HostState(blocks=self._blocks))
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == IteratingStates.COMPLETE:
break
if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
self.set_state_for_host(host.name, s)
# finally, reset the host's state to IteratingStates.SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = IteratingStates.SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
self.end_play = False
self.cur_task = 0
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self.set_state_for_host(host.name, HostState(blocks=[]))
return self._host_states[host.name].copy()
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == IteratingStates.COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host)
if not peek:
self.set_state_for_host(host.name, s)
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = IteratingStates.COMPLETE
return (state, None)
if state.run_state == IteratingStates.SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through IteratingStates.SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through IteratingStates.SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to IteratingStates.TASKS
state.pending_setup = False
state.run_state = IteratingStates.TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == IteratingStates.TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = IteratingStates.RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = IteratingStates.ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = IteratingStates.TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == IteratingStates.RESCUE:
# The process here is identical to IteratingStates.TASKS, except instead
# we move into the always portion of the block.
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE:
state.run_state = IteratingStates.ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = FailedStates.NONE
state.run_state = IteratingStates.ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == IteratingStates.ALWAYS:
# And again, the process here is identical to IteratingStates.TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to IteratingStates.COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != FailedStates.NONE:
state.run_state = IteratingStates.COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = IteratingStates.TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_always_task += 1
elif state.run_state == IteratingStates.HANDLERS:
if state.update_handlers:
# reset handlers for HostState since handlers from include_tasks
# might be there from previous flush
state.handlers = self.handlers[:]
state.update_handlers = False
while True:
try:
task = state.handlers[state.cur_handlers_task]
except IndexError:
task = None
state.cur_handlers_task = 0
state.run_state = state.pre_flushing_run_state
state.update_handlers = True
break
else:
state.cur_handlers_task += 1
if task.is_host_notified(host):
return state, task
elif state.run_state == IteratingStates.COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
# skip implicit flush_handlers if there are no handlers notified
if (
task.implicit
and task.action in C._ACTION_META
and task.args.get('_raw_params', None) == 'flush_handlers'
and (
# the state store in the `state` variable could be a nested state,
# notifications are always stored in the top level state, get it here
not self.get_state_for_host(host.name).handler_notifications
# in case handlers notifying other handlers, the notifications are not
# saved in `handler_notifications` and handlers are notified directly
# to prevent duplicate handler runs, so check whether any handler
# is notified
and all(not h.notified_hosts for h in self.handlers)
)
):
display.debug("No handler notifications for %s, skipping." % host.name)
elif (
(role := task._role)
and role._metadata.allow_duplicates is False
and host.name in self._play._get_cached_role(role)._completed
):
display.debug("'%s' skipped because role has already run" % task)
else:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == IteratingStates.SETUP:
state.fail_state |= FailedStates.SETUP
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= FailedStates.TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= FailedStates.RESCUE
if state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= FailedStates.ALWAYS
state.run_state = IteratingStates.COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
if s.run_state == IteratingStates.HANDLERS:
# we are failing `meta: flush_handlers`, so just reset the state to whatever
# it was before and let `_set_failed_state` figure out the next state
s.run_state = s.pre_flushing_run_state
s.update_handlers = True
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self.set_state_for_host(host.name, s)
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in self._host_states.items() if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == IteratingStates.RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != FailedStates.NONE:
if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0:
return False
elif state.run_state == IteratingStates.ALWAYS and state.fail_state & FailedStates.ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & FailedStates.ALWAYS == 0)
elif state.run_state == IteratingStates.TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & FailedStates.RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def clear_host_errors(self, host):
self._clear_state_errors(self.get_state_for_host(host.name))
def _clear_state_errors(self, state: HostState) -> None:
state.fail_state = FailedStates.NONE
if state.tasks_child_state is not None:
self._clear_state_errors(state.tasks_child_state)
elif state.rescue_child_state is not None:
self._clear_state_errors(state.rescue_child_state)
elif state.always_child_state is not None:
self._clear_state_errors(state.always_child_state)
def get_active_state(self, state):
"""
Finds the active state, recursively if necessary when there are child states.
"""
if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == IteratingStates.ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
"""
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
"""
if state.run_state == IteratingStates.TASKS and state.get_current_block().rescue:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
if state.rescue_child_state is not None:
return self.is_any_block_rescuing(state.rescue_child_state)
if state.always_child_state is not None:
return self.is_any_block_rescuing(state.always_child_state)
return False
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if (state.fail_state != FailedStates.NONE and state.run_state == IteratingStates.TASKS) or not task_list:
return state
if state.run_state == IteratingStates.TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.HANDLERS:
state.handlers[state.cur_handlers_task:state.cur_handlers_task] = [h for b in task_list for h in b.block]
return state
def add_tasks(self, host, task_list):
self.set_state_for_host(host.name, self._insert_tasks_into_state(self.get_host_state(host), task_list))
@property
def host_states(self):
return self._host_states
def get_state_for_host(self, hostname: str) -> HostState:
return self._host_states[hostname]
def set_state_for_host(self, hostname: str, state: HostState) -> None:
if not isinstance(state, HostState):
raise AnsibleAssertionError('Expected state to be a HostState but was a %s' % type(state))
self._host_states[hostname] = state
def set_run_state_for_host(self, hostname: str, run_state: IteratingStates) -> None:
if not isinstance(run_state, IteratingStates):
raise AnsibleAssertionError('Expected run_state to be a IteratingStates but was %s' % (type(run_state)))
self._host_states[hostname].run_state = run_state
def set_fail_state_for_host(self, hostname: str, fail_state: FailedStates) -> None:
if not isinstance(fail_state, FailedStates):
raise AnsibleAssertionError('Expected fail_state to be a FailedStates but was %s' % (type(fail_state)))
self._host_states[hostname].fail_state = fail_state
def add_notification(self, hostname: str, notification: str) -> None:
# preserve order
host_state = self._host_states[hostname]
if notification not in host_state.handler_notifications:
host_state.handler_notifications.append(notification)
def clear_notification(self, hostname: str, notification: str) -> None:
self._host_states[hostname].handler_notifications.remove(notification)
def end_host(self, hostname: str) -> None:
"""Used by ``end_host``, ``end_batch`` and ``end_play`` meta tasks to end executing given host."""
state = self.get_active_state(self.get_state_for_host(hostname))
if state.run_state == IteratingStates.RESCUE:
# This is a special case for when ending a host occurs in rescue.
# By definition the meta task responsible for ending the host
# is the last task, so we need to clear the fail state to mark
# the host as rescued.
# The reason we need to do that is because this operation is
# normally done when PlayIterator transitions from rescue to
# always when only then we can say that rescue didn't fail
# but with ending a host via meta task, we don't get to that transition.
self.set_fail_state_for_host(hostname, FailedStates.NONE)
self.set_run_state_for_host(hostname, IteratingStates.COMPLETE)
self._play._removed_hosts.append(hostname)
| 32,913
|
Python
|
.py
| 592
| 40.97973
| 160
| 0.58596
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,339
|
stats.py
|
ansible_ansible/lib/ansible/executor/stats.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from collections.abc import MutableMapping
from ansible.utils.vars import merge_hash
class AggregateStats:
""" holds stats about per-host activity during playbook runs """
def __init__(self):
self.processed = {}
self.failures = {}
self.ok = {}
self.dark = {}
self.changed = {}
self.skipped = {}
self.rescued = {}
self.ignored = {}
# user defined stats, which can be per host or global
self.custom = {}
def increment(self, what, host):
""" helper function to bump a statistic """
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev + 1
def decrement(self, what, host):
_what = getattr(self, what)
try:
if _what[host] - 1 < 0:
# This should never happen, but let's be safe
raise KeyError("Don't be so negative")
_what[host] -= 1
except KeyError:
_what[host] = 0
def summarize(self, host):
""" return information about a particular host """
return dict(
ok=self.ok.get(host, 0),
failures=self.failures.get(host, 0),
unreachable=self.dark.get(host, 0),
changed=self.changed.get(host, 0),
skipped=self.skipped.get(host, 0),
rescued=self.rescued.get(host, 0),
ignored=self.ignored.get(host, 0),
)
def set_custom_stats(self, which, what, host=None):
""" allow setting of a custom stat"""
if host is None:
host = '_run'
if host not in self.custom:
self.custom[host] = {which: what}
else:
self.custom[host][which] = what
def update_custom_stats(self, which, what, host=None):
""" allow aggregation of a custom stat"""
if host is None:
host = '_run'
if host not in self.custom or which not in self.custom[host]:
return self.set_custom_stats(which, what, host)
# mismatching types
if not isinstance(what, type(self.custom[host][which])):
return None
if isinstance(what, MutableMapping):
self.custom[host][which] = merge_hash(self.custom[host][which], what)
else:
# let overloaded + take care of other types
self.custom[host][which] += what
| 3,180
|
Python
|
.py
| 79
| 32.227848
| 81
| 0.618105
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,340
|
interpreter_discovery.py
|
ansible_ansible/lib/ansible/executor/interpreter_discovery.py
|
# Copyright: (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import bisect
import json
import pkgutil
import re
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.distro import LinuxDistribution
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.facts.system.distribution import Distribution
from traceback import format_exc
OS_FAMILY_LOWER = {k.lower(): v.lower() for k, v in Distribution.OS_FAMILY.items()}
display = Display()
foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND')
class InterpreterDiscoveryRequiredError(Exception):
def __init__(self, message, interpreter_name, discovery_mode):
super(InterpreterDiscoveryRequiredError, self).__init__(message)
self.interpreter_name = interpreter_name
self.discovery_mode = discovery_mode
def __str__(self):
return self.message
def __repr__(self):
# TODO: proper repr impl
return self.message
def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
# interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
# get the system type from uname, and find any random Python that can get us the info we need. For supported
# target OS types, we'll dispatch a Python script that calls platform.dist() (for older platforms, where available)
# and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
# distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
# default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
# FUTURE: add logical equivalence for "python3" in the case of py3-only modules?
if interpreter_name != 'python':
raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name))
host = task_vars.get('inventory_hostname', 'unknown')
res = None
platform_type = 'unknown'
found_interpreters = [u'/usr/bin/python3'] # fallback value
is_auto_legacy = discovery_mode.startswith('auto_legacy')
is_silent = discovery_mode.endswith('_silent')
try:
platform_python_map = C.config.get_config_value('_INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars)
bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars)
display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host)
# not all command -v impls accept a list of commands, so we have to call it once per python
command_list = ["command -v '%s'" % py for py in bootstrap_python_list]
shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list))
# FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do?
res = action._low_level_execute_command(shell_bootstrap, sudoable=False)
raw_stdout = res.get('stdout', u'')
match = foundre.match(raw_stdout)
if not match:
display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host)
raise ValueError('unexpected output from Python interpreter discovery')
platform_type = match.groups()[0].lower().strip()
found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')]
display.debug(u"found interpreters: {0}".format(found_interpreters), host=host)
if not found_interpreters:
if not is_silent:
action._discovery_warnings.append(u'No python interpreters found for '
u'host {0} (tried {1})'.format(host, bootstrap_python_list))
# this is lame, but returning None or throwing an exception is uglier
return u'/usr/bin/python3'
if platform_type != 'linux':
raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type)))
platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py')
# FUTURE: respect pipelining setting instead of just if the connection supports it?
if action._connection.has_pipelining:
res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script)
else:
# FUTURE: implement on-disk case (via script action or ?)
raise NotImplementedError('pipelining support required for extended interpreter discovery')
platform_info = json.loads(res.get('stdout'))
distro, version = _get_linux_distro(platform_info)
if not distro or not version:
raise NotImplementedError('unable to get Linux distribution/version info')
family = OS_FAMILY_LOWER.get(distro.lower().strip())
version_map = platform_python_map.get(distro.lower().strip()) or platform_python_map.get(family)
if not version_map:
raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro))
platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict')
# provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been)
if is_auto_legacy:
if platform_interpreter != u'/usr/bin/python3' and u'/usr/bin/python3' in found_interpreters:
if not is_silent:
action._discovery_warnings.append(
u"Distribution {0} {1} on host {2} should use {3}, but is using "
u"/usr/bin/python3 for backward compatibility with prior Ansible releases. "
u"See {4} for more information"
.format(distro, version, host, platform_interpreter,
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return u'/usr/bin/python3'
if platform_interpreter not in found_interpreters:
if platform_interpreter not in bootstrap_python_list:
# sanity check to make sure we looked for it
if not is_silent:
action._discovery_warnings \
.append(u"Platform interpreter {0} on host {1} is missing from bootstrap list"
.format(platform_interpreter, host))
if not is_silent:
action._discovery_warnings \
.append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the "
u"discovered platform python interpreter was not present. See {5} "
u"for more information."
.format(distro, version, host, platform_interpreter, found_interpreters[0],
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return found_interpreters[0]
return platform_interpreter
except NotImplementedError as ex:
display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host)
except AnsibleError:
raise
except Exception as ex:
if not is_silent:
display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex)))
display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host)
if res and res.get('stderr'):
display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host)
if not is_silent:
action._discovery_warnings \
.append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of "
u"another Python interpreter could change the meaning of that path. See {3} "
u"for more information."
.format(platform_type, host, found_interpreters[0],
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return found_interpreters[0]
def _get_linux_distro(platform_info):
dist_result = platform_info.get('platform_dist_result', [])
if len(dist_result) == 3 and any(dist_result):
return dist_result[0], dist_result[1]
osrelease_content = platform_info.get('osrelease_content')
if not osrelease_content:
return u'', u''
osr = LinuxDistribution._parse_os_release_content(osrelease_content)
return osr.get('id', u''), osr.get('version_id', u'')
def _version_fuzzy_match(version, version_map):
# try exact match first
res = version_map.get(version)
if res:
return res
sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()])
find_looseversion = LooseVersion(version)
# slot match; return nearest previous version we're newer than
kpos = bisect.bisect(sorted_looseversions, find_looseversion)
if kpos == 0:
# older than everything in the list, return the oldest version
# TODO: warning-worthy?
return version_map.get(sorted_looseversions[0].vstring)
# TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)?
# return the next-oldest entry that we're newer than...
return version_map.get(sorted_looseversions[kpos - 1].vstring)
| 9,975
|
Python
|
.py
| 156
| 53.724359
| 127
| 0.672776
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,341
|
python_target.py
|
ansible_ansible/lib/ansible/executor/discovery/python_target.py
|
# Copyright: (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# FUTURE: this could be swapped out for our bundled version of distro to move more complete platform
# logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly
from __future__ import annotations
import json
import platform
import io
import os
def read_utf8_file(path, encoding='utf-8'):
if not os.access(path, os.R_OK):
return None
with io.open(path, 'r', encoding=encoding) as fd:
content = fd.read()
return content
def get_platform_info():
result = dict(platform_dist_result=[])
if hasattr(platform, 'dist'):
result['platform_dist_result'] = platform.dist()
osrelease_content = read_utf8_file('/etc/os-release')
# try to fall back to /usr/lib/os-release
if not osrelease_content:
osrelease_content = read_utf8_file('/usr/lib/os-release')
result['osrelease_content'] = osrelease_content
return result
def main():
info = get_platform_info()
print(json.dumps(info))
if __name__ == '__main__':
main()
| 1,181
|
Python
|
.py
| 30
| 35
| 108
| 0.70194
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,342
|
worker.py
|
ansible_ansible/lib/ansible/executor/process/worker.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import sys
import traceback
from jinja2.exceptions import TemplateNotFound
from multiprocessing.queues import Queue
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.executor.task_executor import TaskExecutor
from ansible.module_utils.common.text.converters import to_text
from ansible.utils.display import Display
from ansible.utils.multiprocessing import context as multiprocessing_context
__all__ = ['WorkerProcess']
display = Display()
current_worker = None
class WorkerQueue(Queue):
"""Queue that raises AnsibleError items on get()."""
def get(self, *args, **kwargs):
result = super(WorkerQueue, self).get(*args, **kwargs)
if isinstance(result, AnsibleError):
raise result
return result
class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defined]
"""
The worker thread class, which uses TaskExecutor to run tasks
read from a job queue and pushes results into a results queue
for reading later.
"""
def __init__(self, final_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj, worker_id):
super(WorkerProcess, self).__init__()
# takes a task queue manager as the sole param:
self._final_q = final_q
self._task_vars = task_vars
self._host = host
self._task = task
self._play_context = play_context
self._loader = loader
self._variable_manager = variable_manager
self._shared_loader_obj = shared_loader_obj
# NOTE: this works due to fork, if switching to threads this should change to per thread storage of temp files
# clear var to ensure we only delete files for this child
self._loader._tempfiles = set()
self.worker_queue = WorkerQueue(ctx=multiprocessing_context)
self.worker_id = worker_id
def _save_stdin(self):
self._new_stdin = None
try:
if sys.stdin.isatty() and sys.stdin.fileno() is not None:
try:
self._new_stdin = os.fdopen(os.dup(sys.stdin.fileno()))
except OSError:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor
pass
except (AttributeError, ValueError):
# couldn't get stdin's fileno
pass
if self._new_stdin is None:
self._new_stdin = open(os.devnull)
def start(self):
"""
multiprocessing.Process replaces the worker's stdin with a new file
but we wish to preserve it if it is connected to a terminal.
Therefore dup a copy prior to calling the real start(),
ensuring the descriptor is preserved somewhere in the new child, and
make sure it is closed in the parent when start() completes.
"""
self._save_stdin()
# FUTURE: this lock can be removed once a more generalized pre-fork thread pause is in place
with display._lock:
try:
return super(WorkerProcess, self).start()
finally:
self._new_stdin.close()
def _hard_exit(self, e):
"""
There is no safe exception to return to higher level code that does not
risk an innocent try/except finding itself executing in the wrong
process. All code executing above WorkerProcess.run() on the stack
conceptually belongs to another program.
"""
try:
display.debug(u"WORKER HARD EXIT: %s" % to_text(e))
except BaseException:
# If the cause of the fault is IOError being generated by stdio,
# attempting to log a debug message may trigger another IOError.
# Try printing once then give up.
pass
os._exit(1)
def run(self):
"""
Wrap _run() to ensure no possibility an errant exception can cause
control to return to the StrategyBase task loop, or any other code
higher in the stack.
As multiprocessing in Python 2.x provides no protection, it is possible
a try/except added in far-away code can cause a crashed child process
to suddenly assume the role and prior state of its parent.
"""
try:
return self._run()
except BaseException as e:
self._hard_exit(e)
finally:
# This is a hack, pure and simple, to work around a potential deadlock
# in ``multiprocessing.Process`` when flushing stdout/stderr during process
# shutdown.
#
# We should no longer have a problem with ``Display``, as it now proxies over
# the queue from a fork. However, to avoid any issues with plugins that may
# be doing their own printing, this has been kept.
#
# This happens at the very end to avoid that deadlock, by simply side
# stepping it. This should not be treated as a long term fix.
#
# TODO: Evaluate migrating away from the ``fork`` multiprocessing start method.
sys.stdout = sys.stderr = open(os.devnull, 'w')
def _run(self):
"""
Called when the process is started. Pushes the result onto the
results queue. We also remove the host from the blocked hosts list, to
signify that they are ready for their next task.
"""
# import cProfile, pstats, StringIO
# pr = cProfile.Profile()
# pr.enable()
# Set the queue on Display so calls to Display.display are proxied over the queue
display.set_queue(self._final_q)
global current_worker
current_worker = self
try:
# execute the task and build a TaskResult from the result
display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
executor_result = TaskExecutor(
self._host,
self._task,
self._task_vars,
self._play_context,
self._new_stdin,
self._loader,
self._shared_loader_obj,
self._final_q,
self._variable_manager,
).run()
display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid))
self._host.vars = dict()
self._host.groups = []
# put the result on the result queue
display.debug("sending task result for task %s" % self._task._uuid)
try:
self._final_q.send_task_result(
self._host.name,
self._task._uuid,
executor_result,
task_fields=self._task.dump_attrs(),
)
except Exception as e:
display.debug(f'failed to send task result ({e}), sending surrogate result')
self._final_q.send_task_result(
self._host.name,
self._task._uuid,
# Overriding the task result, to represent the failure
{
'failed': True,
'msg': f'{e}',
'exception': traceback.format_exc(),
},
# The failure pickling may have been caused by the task attrs, omit for safety
{},
)
display.debug("done sending task result for task %s" % self._task._uuid)
except AnsibleConnectionFailure:
self._host.vars = dict()
self._host.groups = []
self._final_q.send_task_result(
self._host.name,
self._task._uuid,
dict(unreachable=True),
task_fields=self._task.dump_attrs(),
)
except Exception as e:
if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
try:
self._host.vars = dict()
self._host.groups = []
self._final_q.send_task_result(
self._host.name,
self._task._uuid,
dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
task_fields=self._task.dump_attrs(),
)
except Exception:
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
finally:
self._clean_up()
display.debug("WORKER PROCESS EXITING")
# pr.disable()
# s = StringIO.StringIO()
# sortby = 'time'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# with open('worker_%06d.stats' % os.getpid(), 'w') as f:
# f.write(s.getvalue())
def _clean_up(self):
# NOTE: see note in init about forks
# ensure we cleanup all temp files for this worker
self._loader.cleanup_all_tmp_files()
| 10,041
|
Python
|
.py
| 222
| 34.085586
| 125
| 0.596238
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,343
|
__init__.py
|
ansible_ansible/lib/ansible/executor/process/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
| 749
|
Python
|
.py
| 17
| 43
| 70
| 0.77565
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,344
|
module_manifest.py
|
ansible_ansible/lib/ansible/executor/powershell/module_manifest.py
|
# (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import base64
import errno
import json
import os
import pkgutil
import secrets
import re
from importlib import import_module
from ansible.module_utils.compat.version import LooseVersion
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.loader import ps_module_utils_loader
from ansible.utils.collection_loader import resource_from_fqcr
class PSModuleDepFinder(object):
def __init__(self):
# This is also used by validate-modules to get a module's required utils in base and a collection.
self.ps_modules = dict()
self.exec_scripts = dict()
# by defining an explicit dict of cs utils and where they are used, we
# can potentially save time by not adding the type multiple times if it
# isn't needed
self.cs_utils_wrapper = dict()
self.cs_utils_module = dict()
self.ps_version = None
self.os_version = None
self.become = False
self._re_cs_module = [
# Reference C# module_util in another C# util, this must always be the fully qualified name.
# 'using ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
re.compile(to_bytes(r'(?i)^using\s((Ansible\..+)|'
r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+));\s*$')),
]
self._re_cs_in_ps_module = [
# Reference C# module_util in a PowerShell module
# '#AnsibleRequires -CSharpUtil Ansible.{name}'
# '#AnsibleRequires -CSharpUtil ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
# '#AnsibleRequires -CSharpUtil ..module_utils.{name}'
# Can have '-Optional' at the end to denote the util is optional
re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((Ansible\.[\w\.]+)|'
r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
r'(\.[\w\.]+))(?P<optional>\s+-Optional){0,1}')),
]
self._re_ps_module = [
# Original way of referencing a builtin module_util
# '#Requires -Module Ansible.ModuleUtils.{name}
re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)')),
# New way of referencing a builtin and collection module_util
# '#AnsibleRequires -PowerShell Ansible.ModuleUtils.{name}'
# '#AnsibleRequires -PowerShell ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
# '#AnsibleRequires -PowerShell ..module_utils.{name}'
# Can have '-Optional' at the end to denote the util is optional
re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-powershell\s+((Ansible\.ModuleUtils\.[\w\.]+)|'
r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
r'(\.[\w\.]+))(?P<optional>\s+-Optional){0,1}')),
]
self._re_wrapper = re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-wrapper\s+(\w*)'))
self._re_ps_version = re.compile(to_bytes(r'(?i)^#requires\s+\-version\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_os_version = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-osversion\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_become = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-become$'))
def scan_module(self, module_data, fqn=None, wrapper=False, powershell=True):
lines = module_data.split(b'\n')
module_utils = set()
if wrapper:
cs_utils = self.cs_utils_wrapper
else:
cs_utils = self.cs_utils_module
if powershell:
checks = [
# PS module contains '#Requires -Module Ansible.ModuleUtils.*'
# PS module contains '#AnsibleRequires -Powershell Ansible.*' (or collections module_utils ref)
(self._re_ps_module, self.ps_modules, ".psm1"),
# PS module contains '#AnsibleRequires -CSharpUtil Ansible.*' (or collections module_utils ref)
(self._re_cs_in_ps_module, cs_utils, ".cs"),
]
else:
checks = [
# CS module contains 'using Ansible.*;' or 'using ansible_collections.ns.coll.plugins.module_utils.*;'
(self._re_cs_module, cs_utils, ".cs"),
]
for line in lines:
for check in checks:
for pattern in check[0]:
match = pattern.match(line)
if match:
# tolerate windows line endings by stripping any remaining
# newline chars
module_util_name = to_text(match.group(1).rstrip())
match_dict = match.groupdict()
optional = match_dict.get('optional', None) is not None
if module_util_name not in check[1].keys():
module_utils.add((module_util_name, check[2], fqn, optional))
break
if powershell:
ps_version_match = self._re_ps_version.match(line)
if ps_version_match:
self._parse_version_match(ps_version_match, "ps_version")
os_version_match = self._re_os_version.match(line)
if os_version_match:
self._parse_version_match(os_version_match, "os_version")
# once become is set, no need to keep on checking recursively
if not self.become:
become_match = self._re_become.match(line)
if become_match:
self.become = True
if wrapper:
wrapper_match = self._re_wrapper.match(line)
if wrapper_match:
self.scan_exec_script(wrapper_match.group(1).rstrip())
# recursively drill into each Requires to see if there are any more
# requirements
for m in set(module_utils):
self._add_module(*m, wrapper=wrapper)
def scan_exec_script(self, name):
# scans lib/ansible/executor/powershell for scripts used in the module
# exec side. It also scans these scripts for any dependencies
name = to_text(name)
if name in self.exec_scripts.keys():
return
data = pkgutil.get_data("ansible.executor.powershell", to_native(name + ".ps1"))
if data is None:
raise AnsibleError("Could not find executor powershell script "
"for '%s'" % name)
b_data = to_bytes(data)
# remove comments to reduce the payload size in the exec wrappers
if C.DEFAULT_DEBUG:
exec_script = b_data
else:
exec_script = _strip_comments(b_data)
self.exec_scripts[name] = to_bytes(exec_script)
self.scan_module(b_data, wrapper=True, powershell=True)
def _add_module(self, name, ext, fqn, optional, wrapper=False):
m = to_text(name)
util_fqn = None
if m.startswith("Ansible."):
# Builtin util, use plugin loader to get the data
mu_path = ps_module_utils_loader.find_plugin(m, ext)
if not mu_path:
if optional:
return
raise AnsibleError('Could not find imported module support code '
'for \'%s\'' % m)
module_util_data = to_bytes(_slurp(mu_path))
else:
# Collection util, load the package data based on the util import.
submodules = m.split(".")
if m.startswith('.'):
fqn_submodules = fqn.split('.')
for submodule in submodules:
if submodule:
break
del fqn_submodules[-1]
submodules = fqn_submodules + [s for s in submodules if s]
n_package_name = to_native('.'.join(submodules[:-1]), errors='surrogate_or_strict')
n_resource_name = to_native(submodules[-1] + ext, errors='surrogate_or_strict')
try:
module_util = import_module(n_package_name)
pkg_data = pkgutil.get_data(n_package_name, n_resource_name)
if pkg_data is None:
raise ImportError("No package data found")
module_util_data = to_bytes(pkg_data, errors='surrogate_or_strict')
util_fqn = to_text("%s.%s " % (n_package_name, submodules[-1]), errors='surrogate_or_strict')
# Get the path of the util which is required for coverage collection.
resource_paths = list(module_util.__path__)
if len(resource_paths) != 1:
# This should never happen with a collection but we are just being defensive about it.
raise AnsibleError("Internal error: Referenced module_util package '%s' contains 0 or multiple "
"import locations when we only expect 1." % n_package_name)
mu_path = os.path.join(resource_paths[0], n_resource_name)
except (ImportError, OSError) as err:
if getattr(err, "errno", errno.ENOENT) == errno.ENOENT:
if optional:
return
raise AnsibleError('Could not find collection imported module support code for \'%s\''
% to_native(m))
else:
raise
util_info = {
'data': module_util_data,
'path': to_text(mu_path),
}
if ext == ".psm1":
self.ps_modules[m] = util_info
else:
if wrapper:
self.cs_utils_wrapper[m] = util_info
else:
self.cs_utils_module[m] = util_info
self.scan_module(module_util_data, fqn=util_fqn, wrapper=wrapper, powershell=(ext == ".psm1"))
def _parse_version_match(self, match, attribute):
new_version = to_text(match.group(1)).rstrip()
# PowerShell cannot cast a string of "1" to Version, it must have at
# least the major.minor for it to be valid so we append 0
if match.group(2) is None:
new_version = "%s.0" % new_version
existing_version = getattr(self, attribute, None)
if existing_version is None:
setattr(self, attribute, new_version)
else:
# determine which is the latest version and set that
if LooseVersion(new_version) > LooseVersion(existing_version):
setattr(self, attribute, new_version)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s"
% os.path.abspath(path))
with open(path, 'rb') as fd:
data = fd.read()
return data
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
start_block = False
for line in source.splitlines():
l = line.strip()
if start_block and l.endswith(b'#>'):
start_block = False
continue
elif start_block:
continue
elif l.startswith(b'<#'):
start_block = True
continue
elif not l or l.startswith(b'#'):
continue
buf.append(line)
return b'\n'.join(buf)
def _create_powershell_wrapper(b_module_data, module_path, module_args,
environment, async_timeout, become,
become_method, become_user, become_password,
become_flags, substyle, task_vars, module_fqn):
# creates the manifest/wrapper used in PowerShell/C# modules to enable
# things like become and async - this is also called in action/script.py
# FUTURE: add process_wrapper.ps1 to run module_wrapper in a new process
# if running under a persistent connection and substyle is C# so we
# don't have type conflicts
finder = PSModuleDepFinder()
if substyle != 'script':
# don't scan the module for util dependencies and other Ansible related
# flags if the substyle is 'script' which is set by action/script
finder.scan_module(b_module_data, fqn=module_fqn, powershell=(substyle == "powershell"))
module_wrapper = "module_%s_wrapper" % substyle
exec_manifest = dict(
module_entry=to_text(base64.b64encode(b_module_data)),
powershell_modules=dict(),
csharp_utils=dict(),
csharp_utils_module=list(), # csharp_utils only required by a module
module_args=module_args,
actions=[module_wrapper],
environment=environment,
encoded_output=False,
)
finder.scan_exec_script(module_wrapper)
if async_timeout > 0:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('async_watchdog')
finder.scan_exec_script('async_wrapper')
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["actions"].insert(0, 'async_wrapper')
exec_manifest["async_jid"] = f'j{secrets.randbelow(999999999999)}'
exec_manifest["async_timeout_sec"] = async_timeout
exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars)
if become and resource_from_fqcr(become_method) == 'runas': # runas and namespace.collection.runas
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest["actions"].insert(0, 'become_wrapper')
exec_manifest["become_user"] = become_user
exec_manifest["become_password"] = become_password
exec_manifest['become_flags'] = become_flags
exec_manifest['min_ps_version'] = finder.ps_version
exec_manifest['min_os_version'] = finder.os_version
if finder.become and 'become_wrapper' not in exec_manifest['actions']:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest['actions'].insert(0, 'become_wrapper')
exec_manifest['become_user'] = 'SYSTEM'
exec_manifest['become_password'] = None
exec_manifest['become_flags'] = None
coverage_manifest = dict(
module_path=module_path,
module_util_paths=dict(),
output=None,
)
coverage_output = C.config.get_config_value('COVERAGE_REMOTE_OUTPUT', variables=task_vars)
if coverage_output and substyle == 'powershell':
finder.scan_exec_script('coverage_wrapper')
coverage_manifest['output'] = coverage_output
coverage_enabled = C.config.get_config_value('COVERAGE_REMOTE_PATHS', variables=task_vars)
coverage_manifest['path_filter'] = coverage_enabled
# make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
finder._add_module(b"Ansible.ModuleUtils.AddType", ".psm1", None, False,
wrapper=False)
# exec_wrapper is only required to be part of the payload if using
# become or async, to save on payload space we check if exec_wrapper has
# already been added, and remove it manually if it hasn't later
exec_required = "exec_wrapper" in finder.exec_scripts.keys()
finder.scan_exec_script("exec_wrapper")
# must contain an empty newline so it runs the begin/process/end block
finder.exec_scripts["exec_wrapper"] += b"\n\n"
exec_wrapper = finder.exec_scripts["exec_wrapper"]
if not exec_required:
finder.exec_scripts.pop("exec_wrapper")
for name, data in finder.exec_scripts.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest[name] = b64_data
for name, data in finder.ps_modules.items():
b64_data = to_text(base64.b64encode(data['data']))
exec_manifest['powershell_modules'][name] = b64_data
coverage_manifest['module_util_paths'][name] = data['path']
cs_utils = {}
for cs_util in [finder.cs_utils_wrapper, finder.cs_utils_module]:
for name, data in cs_util.items():
cs_utils[name] = data['data']
for name, data in cs_utils.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest['csharp_utils'][name] = b64_data
exec_manifest['csharp_utils_module'] = list(finder.cs_utils_module.keys())
# To save on the data we are sending across we only add the coverage info if coverage is being run
if 'coverage_wrapper' in exec_manifest:
exec_manifest['coverage'] = coverage_manifest
b_json = to_bytes(json.dumps(exec_manifest))
# delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
b_data = exec_wrapper + b'\0\0\0\0' + b_json
return b_data
| 17,365
|
Python
|
.py
| 328
| 41.176829
| 124
| 0.60224
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,345
|
template.py
|
ansible_ansible/lib/ansible/template/template.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from jinja2.nativetypes import NativeTemplate
__all__ = ['AnsibleJ2Template']
class AnsibleJ2Template(NativeTemplate):
"""
A helper class, which prevents Jinja2 from running AnsibleJ2Vars through dict().
Without this, {% include %} and similar will create new contexts unlike the special
one created in Templar.template. This ensures they are all alike, except for
potential locals.
"""
def new_context(self, vars=None, shared=False, locals=None):
if vars is None:
vars = dict(self.globals or ())
if isinstance(vars, dict):
vars = vars.copy()
if locals is not None:
vars.update(locals)
else:
vars = vars.add_locals(locals)
return self.environment.context_class(self.environment, vars, self.name, self.blocks)
| 1,583
|
Python
|
.py
| 36
| 39.333333
| 93
| 0.72013
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,346
|
native_helpers.py
|
ansible_ansible/lib/ansible/template/native_helpers.py
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import ast
from itertools import islice, chain
from types import GeneratorType
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.native_jinja import NativeJinjaText
_JSON_MAP = {
"true": True,
"false": False,
"null": None,
}
class Json2Python(ast.NodeTransformer):
def visit_Name(self, node):
if node.id not in _JSON_MAP:
return node
return ast.Constant(value=_JSON_MAP[node.id])
def ansible_eval_concat(nodes):
"""Return a string of concatenated compiled nodes. Throw an undefined error
if any of the nodes is undefined.
If the result of concat appears to be a dictionary, list or bool,
try and convert it to such using literal_eval, the same mechanism as used
in jinja2_native.
Used in Templar.template() when jinja2_native=False and convert_data=True.
"""
head = list(islice(nodes, 2))
if not head:
return ''
if len(head) == 1:
out = head[0]
if isinstance(out, NativeJinjaText):
return out
out = to_text(out)
else:
if isinstance(nodes, GeneratorType):
nodes = chain(head, nodes)
out = ''.join([to_text(v) for v in nodes])
# if this looks like a dictionary, list or bool, convert it to such
if out.startswith(('{', '[')) or out in ('True', 'False'):
try:
out = ast.literal_eval(
ast.fix_missing_locations(
Json2Python().visit(
ast.parse(out, mode='eval')
)
)
)
except (TypeError, ValueError, SyntaxError, MemoryError):
pass
return out
def ansible_concat(nodes):
"""Return a string of concatenated compiled nodes. Throw an undefined error
if any of the nodes is undefined. Other than that it is equivalent to
Jinja2's default concat function.
Used in Templar.template() when jinja2_native=False and convert_data=False.
"""
return ''.join([to_text(v) for v in nodes])
def ansible_native_concat(nodes):
"""Return a native Python type from the list of compiled nodes. If the
result is a single node, its value is returned. Otherwise, the nodes are
concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
string is returned.
https://github.com/pallets/jinja/blob/master/src/jinja2/nativetypes.py
"""
head = list(islice(nodes, 2))
if not head:
return None
if len(head) == 1:
out = head[0]
# TODO send unvaulted data to literal_eval?
if isinstance(out, AnsibleVaultEncryptedUnicode):
return out.data
if isinstance(out, NativeJinjaText):
# Sometimes (e.g. ``| string``) we need to mark variables
# in a special way so that they remain strings and are not
# passed into literal_eval.
# See:
# https://github.com/ansible/ansible/issues/70831
# https://github.com/pallets/jinja/issues/1200
# https://github.com/ansible/ansible/issues/70831#issuecomment-664190894
return out
# short-circuit literal_eval for anything other than strings
if not isinstance(out, string_types):
return out
else:
if isinstance(nodes, GeneratorType):
nodes = chain(head, nodes)
out = ''.join([to_text(v) for v in nodes])
try:
evaled = ast.literal_eval(
# In Python 3.10+ ast.literal_eval removes leading spaces/tabs
# from the given string. For backwards compatibility we need to
# parse the string ourselves without removing leading spaces/tabs.
ast.parse(out, mode='eval')
)
except (TypeError, ValueError, SyntaxError, MemoryError):
return out
if isinstance(evaled, string_types):
quote = out[0]
return f'{quote}{evaled}{quote}'
return evaled
| 4,333
|
Python
|
.py
| 105
| 33.333333
| 92
| 0.651335
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,347
|
vars.py
|
ansible_ansible/lib/ansible/template/vars.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from collections import ChainMap
from jinja2.utils import missing
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.common.text.converters import to_native
__all__ = ['AnsibleJ2Vars']
def _process_locals(_l):
if _l is None:
return {}
return {
k: v for k, v in _l.items()
if v is not missing
and k not in {'context', 'environment', 'template'} # NOTE is this really needed?
}
class AnsibleJ2Vars(ChainMap):
"""Helper variable storage class that allows for nested variables templating: `foo: "{{ bar }}"`."""
def __init__(self, templar, globals, locals=None):
self._templar = templar
super().__init__(
_process_locals(locals), # first mapping has the highest precedence
self._templar.available_variables,
globals,
)
def __getitem__(self, varname):
variable = super().__getitem__(varname)
from ansible.vars.hostvars import HostVars
if (varname == "vars" and isinstance(variable, dict)) or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'):
return variable
try:
return self._templar.template(variable)
except AnsibleUndefinedVariable as e:
# Instead of failing here prematurely, return an Undefined
# object which fails only after its first usage allowing us to
# do lazy evaluation and passing it into filters/tests that
# operate on such objects.
return self._templar.environment.undefined(
hint=f"{variable}: {e.message}",
name=varname,
exc=AnsibleUndefinedVariable,
)
except Exception as e:
msg = getattr(e, 'message', None) or to_native(e)
raise AnsibleError(
f"An unhandled exception occurred while templating '{to_native(variable)}'. "
f"Error was a {type(e)}, original message: {msg}"
)
def add_locals(self, locals):
"""If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
"""
if locals is None:
return self
current_locals = self.maps[0]
current_globals = self.maps[2]
# prior to version 2.9, locals contained all of the vars and not just the current
# local vars so this was not necessary for locals to propagate down to nested includes
new_locals = current_locals | locals
return AnsibleJ2Vars(self._templar, current_globals, locals=new_locals)
| 2,854
|
Python
|
.py
| 60
| 38.416667
| 131
| 0.6435
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,348
|
__init__.py
|
ansible_ansible/lib/ansible/template/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import ast
import datetime
import functools
import os
import pwd
import re
import time
from collections.abc import Iterator, Sequence, Mapping, MappingView, MutableMapping
from contextlib import contextmanager
from numbers import Number
from traceback import format_exc
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.loaders import FileSystemLoader
from jinja2.nativetypes import NativeEnvironment
from jinja2.runtime import Context, StrictUndefined
from ansible import constants as C
from ansible.errors import (
AnsibleAssertionError,
AnsibleError,
AnsibleFilterError,
AnsibleLookupError,
AnsibleOptionsError,
AnsibleUndefinedVariable,
)
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
from ansible.module_utils.common.collections import is_sequence
from ansible.plugins.loader import filter_loader, lookup_loader, test_loader
from ansible.template.native_helpers import ansible_native_concat, ansible_eval_concat, ansible_concat
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.display import Display
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.native_jinja import NativeJinjaText
from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var
display = Display()
__all__ = ['Templar', 'generate_ansible_template_vars']
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = (bool, Number)
JINJA2_OVERRIDE = '#jinja2:'
JINJA2_BEGIN_TOKENS = frozenset(('variable_begin', 'block_begin', 'comment_begin', 'raw_begin'))
JINJA2_END_TOKENS = frozenset(('variable_end', 'block_end', 'comment_end', 'raw_end'))
RANGE_TYPE = type(range(0))
def generate_ansible_template_vars(path, fullpath=None, dest_path=None):
if fullpath is None:
b_path = to_bytes(path)
else:
b_path = to_bytes(fullpath)
try:
template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
except (KeyError, TypeError):
template_uid = os.stat(b_path).st_uid
temp_vars = {
'template_host': to_unsafe_text(os.uname()[1]),
'template_path': to_unsafe_text(path),
'template_mtime': datetime.datetime.fromtimestamp(os.path.getmtime(b_path)),
'template_uid': to_unsafe_text(template_uid),
'template_run_date': datetime.datetime.now(),
'template_destpath': wrap_var(to_native(dest_path)) if dest_path else None,
}
if fullpath is None:
temp_vars['template_fullpath'] = wrap_var(os.path.abspath(path))
else:
temp_vars['template_fullpath'] = wrap_var(fullpath)
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host="{{ template_host }}",
uid="{{ template_uid }}",
file="{{ template_path }}"
)
temp_vars['ansible_managed'] = time.strftime(to_native(managed_str), time.localtime(os.path.getmtime(b_path)))
return temp_vars
def _escape_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and jinja_env.variable_start_string in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\', '\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
def _create_overlay(data, overrides, jinja_env):
if overrides is None:
overrides = {}
try:
has_override_header = data.startswith(JINJA2_OVERRIDE)
except (TypeError, AttributeError):
has_override_header = False
if overrides or has_override_header:
overlay = jinja_env.overlay(**overrides)
else:
overlay = jinja_env
# Get jinja env overrides from template
if has_override_header:
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol + 1:]
for pair in line.split(','):
if ':' not in pair:
raise AnsibleError("failed to parse jinja2 override '%s'."
" Did you use something different from colon as key-value separator?" % pair.strip())
(key, val) = pair.split(':', 1)
key = key.strip()
if hasattr(overlay, key):
setattr(overlay, key, ast.literal_eval(val.strip()))
else:
display.warning(f"Could not find Jinja2 environment setting to override: '{key}'")
return data, overlay
def is_possibly_template(data, jinja_env):
"""Determines if a string looks like a template, by seeing if it
contains a jinja2 start delimiter. Does not guarantee that the string
is actually a template.
This is different than ``is_template`` which is more strict.
This method may return ``True`` on a string that is not templatable.
Useful when guarding passing a string for templating, but when
you want to allow the templating engine to make the final
assessment which may result in ``TemplateSyntaxError``.
"""
if isinstance(data, string_types):
for marker in (jinja_env.block_start_string, jinja_env.variable_start_string, jinja_env.comment_start_string):
if marker in data:
return True
return False
def is_template(data, jinja_env):
"""This function attempts to quickly detect whether a value is a jinja2
template. To do so, we look for the first 2 matching jinja2 tokens for
start and end delimiters.
"""
found = None
start = True
comment = False
d2 = jinja_env.preprocess(data)
# Quick check to see if this is remotely like a template before doing
# more expensive investigation.
if not is_possibly_template(d2, jinja_env):
return False
# This wraps a lot of code, but this is due to lex returning a generator
# so we may get an exception at any part of the loop
try:
for token in jinja_env.lex(d2):
if token[1] in JINJA2_BEGIN_TOKENS:
if start and token[1] == 'comment_begin':
# Comments can wrap other token types
comment = True
start = False
# Example: variable_end -> variable
found = token[1].split('_')[0]
elif token[1] in JINJA2_END_TOKENS:
if token[1].split('_')[0] == found:
return True
elif comment:
continue
return False
except TemplateSyntaxError:
return False
return False
def _count_newlines_from_end(in_str):
"""
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
"""
try:
i = len(in_str)
j = i - 1
while in_str[j] == '\n':
j -= 1
return i - 1 - j
except IndexError:
# Uncommon cases: zero length string and string containing only newlines
return i
def recursive_check_defined(item):
from jinja2.runtime import Undefined
if isinstance(item, MutableMapping):
for key in item:
recursive_check_defined(item[key])
elif isinstance(item, list):
for i in item:
recursive_check_defined(i)
else:
if isinstance(item, Undefined):
raise AnsibleFilterError("{0} is undefined".format(item))
def _is_rolled(value):
"""Helper method to determine if something is an unrolled generator,
iterator, or similar object
"""
return (
isinstance(value, Iterator) or
isinstance(value, MappingView) or
isinstance(value, RANGE_TYPE)
)
def _unroll_iterator(func):
"""Wrapper function, that intercepts the result of a templating
and auto unrolls a generator, so that users are not required to
explicitly use ``|list`` to unroll.
"""
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if _is_rolled(ret):
return list(ret)
return ret
return functools.update_wrapper(wrapper, func)
def _wrap_native_text(func):
"""Wrapper function, that intercepts the result of a filter
and wraps it into NativeJinjaText which is then used
in ``ansible_native_concat`` to indicate that it is a text
which should not be passed into ``literal_eval``.
"""
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
return NativeJinjaText(ret)
return functools.update_wrapper(wrapper, func)
class AnsibleUndefined(StrictUndefined):
"""
A custom Undefined class, which returns further Undefined objects on access,
rather than throwing an exception.
"""
def __getattr__(self, name):
if name == '__UNSAFE__':
# AnsibleUndefined should never be assumed to be unsafe
# This prevents ``hasattr(val, '__UNSAFE__')`` from evaluating to ``True``
raise AttributeError(name)
# Return original Undefined object to preserve the first failure context
return self
def __getitem__(self, key):
# Return original Undefined object to preserve the first failure context
return self
def __repr__(self):
return 'AnsibleUndefined(hint={0!r}, obj={1!r}, name={2!r})'.format(
self._undefined_hint,
self._undefined_obj,
self._undefined_name
)
def __contains__(self, item):
# Return original Undefined object to preserve the first failure context
return self
class AnsibleContext(Context):
"""
A custom context, which intercepts resolve_or_missing() calls and sets a flag
internally if any variable lookup returns an AnsibleUnsafe value. This
flag is checked post-templating, and (when set) will result in the
final templated result being wrapped in AnsibleUnsafe.
"""
def __init__(self, *args, **kwargs):
super(AnsibleContext, self).__init__(*args, **kwargs)
self.unsafe = False
def _is_unsafe(self, val):
"""
Our helper function, which will also recursively check dict and
list entries due to the fact that they may be repr'd and contain
a key or value which contains jinja2 syntax and would otherwise
lose the AnsibleUnsafe value.
"""
if isinstance(val, dict):
for key in val.keys():
if self._is_unsafe(val[key]):
return True
elif isinstance(val, list):
for item in val:
if self._is_unsafe(item):
return True
elif getattr(val, '__UNSAFE__', False) is True:
return True
return False
def _update_unsafe(self, val):
if val is not None and not self.unsafe and self._is_unsafe(val):
self.unsafe = True
def resolve_or_missing(self, key):
val = super(AnsibleContext, self).resolve_or_missing(key)
self._update_unsafe(val)
return val
def get_all(self):
"""Return the complete context as a dict including the exported
variables. For optimizations reasons this might not return an
actual copy so be careful with using it.
This is to prevent from running ``AnsibleJ2Vars`` through dict():
``dict(self.parent, **self.vars)``
In Ansible this means that ALL variables would be templated in the
process of re-creating the parent because ``AnsibleJ2Vars`` templates
each variable in its ``__getitem__`` method. Instead we re-create the
parent via ``AnsibleJ2Vars.add_locals`` that creates a new
``AnsibleJ2Vars`` copy without templating each variable.
This will prevent unnecessarily templating unused variables in cases
like setting a local variable and passing it to {% include %}
in a template.
Also see ``AnsibleJ2Template``and
https://github.com/pallets/jinja/commit/d67f0fd4cc2a4af08f51f4466150d49da7798729
"""
if not self.vars:
return self.parent
if not self.parent:
return self.vars
if isinstance(self.parent, AnsibleJ2Vars):
return self.parent.add_locals(self.vars)
else:
# can this happen in Ansible?
return dict(self.parent, **self.vars)
class JinjaPluginIntercept(MutableMapping):
""" Simulated dict class that loads Jinja2Plugins at request
otherwise all plugins would need to be loaded a priori.
NOTE: plugin_loader still loads all 'builtin/legacy' at
start so only collection plugins are really at request.
"""
def __init__(self, delegatee, pluginloader, *args, **kwargs):
super(JinjaPluginIntercept, self).__init__(*args, **kwargs)
self._pluginloader = pluginloader
# Jinja environment's mapping of known names (initially just J2 builtins)
self._delegatee = delegatee
# our names take precedence over Jinja's, but let things we've tried to resolve skip the pluginloader
self._seen_it = set()
def __getitem__(self, key):
if not isinstance(key, string_types):
raise ValueError('key must be a string, got %s instead' % type(key))
original_exc = None
if key not in self._seen_it:
# this looks too early to set this- it isn't. Setting it here keeps requests for Jinja builtins from
# going through the pluginloader more than once, which is extremely slow for something that won't ever succeed.
self._seen_it.add(key)
plugin = None
try:
plugin = self._pluginloader.get(key)
except (AnsibleError, KeyError) as e:
original_exc = e
except Exception as e:
display.vvvv('Unexpected plugin load (%s) exception: %s' % (key, to_native(e)))
raise e
# if a plugin was found/loaded
if plugin:
# set in filter cache and avoid expensive plugin load
self._delegatee[key] = plugin.j2_function
# raise template syntax error if we could not find ours or jinja2 one
try:
func = self._delegatee[key]
except KeyError as e:
self._seen_it.remove(key)
raise TemplateSyntaxError('Could not load "%s": %s' % (key, to_native(original_exc or e)), 0)
# if i do have func and it is a filter, it needs wrapping
if self._pluginloader.type == 'filter':
# filter need wrapping
if key in C.STRING_TYPE_FILTERS:
# avoid literal_eval when you WANT strings
func = _wrap_native_text(func)
else:
# conditionally unroll iterators/generators to avoid having to use `|list` after every filter
func = _unroll_iterator(func)
return func
def __setitem__(self, key, value):
return self._delegatee.__setitem__(key, value)
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
# not strictly accurate since we're not counting dynamically-loaded values
return iter(self._delegatee)
def __len__(self):
# not strictly accurate since we're not counting dynamically-loaded values
return len(self._delegatee)
def _fail_on_undefined(data):
"""Recursively find an undefined value in a nested data structure
and properly raise the undefined exception.
"""
if isinstance(data, Mapping):
for value in data.values():
_fail_on_undefined(value)
elif is_sequence(data):
for item in data:
_fail_on_undefined(item)
else:
if isinstance(data, StrictUndefined):
# To actually raise the undefined exception we need to
# access the undefined object otherwise the exception would
# be raised on the next access which might not be properly
# handled.
# See https://github.com/ansible/ansible/issues/52158
# and StrictUndefined implementation in upstream Jinja2.
str(data)
return data
@_unroll_iterator
def _ansible_finalize(thing):
"""A custom finalize function for jinja2, which prevents None from being
returned. This avoids a string of ``"None"`` as ``None`` has no
importance in YAML.
The function is decorated with ``_unroll_iterator`` so that users are not
required to explicitly use ``|list`` to unroll a generator. This only
affects the scenario where the final result of templating
is a generator, e.g. ``range``, ``dict.items()`` and so on. Filters
which can produce a generator in the middle of a template are already
wrapped with ``_unroll_generator`` in ``JinjaPluginIntercept``.
"""
return thing if _fail_on_undefined(thing) is not None else ''
class AnsibleEnvironment(NativeEnvironment):
"""
Our custom environment, which simply allows us to override the class-level
values for the Template and Context classes used by jinja2 internally.
"""
context_class = AnsibleContext
template_class = AnsibleJ2Template
concat = staticmethod(ansible_eval_concat) # type: ignore[assignment]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters = JinjaPluginIntercept(self.filters, filter_loader)
self.tests = JinjaPluginIntercept(self.tests, test_loader)
self.trim_blocks = True
self.undefined = AnsibleUndefined
self.finalize = _ansible_finalize
class AnsibleNativeEnvironment(AnsibleEnvironment):
concat = staticmethod(ansible_native_concat) # type: ignore[assignment]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.finalize = _unroll_iterator(_fail_on_undefined)
class Templar:
"""
The main class for templating, with the main entry-point of template().
"""
def __init__(self, loader, variables=None):
self._loader = loader
self._available_variables = {} if variables is None else variables
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
environment_class = AnsibleNativeEnvironment if C.DEFAULT_JINJA2_NATIVE else AnsibleEnvironment
self.environment = environment_class(
extensions=self._get_extensions(),
loader=FileSystemLoader(loader.get_basedir() if loader else '.'),
)
self.environment.template_class.environment_class = environment_class
# Custom globals
self.environment.globals['lookup'] = self._lookup
self.environment.globals['query'] = self.environment.globals['q'] = self._query_lookup
self.environment.globals['now'] = self._now_datetime
self.environment.globals['undef'] = self._make_undefined
# the current rendering context under which the templar class is working
self.cur_context = None
# this regex is re-compiled each time variable_start_string and variable_end_string are possibly changed
self._compile_single_var(self.environment)
self.jinja2_native = C.DEFAULT_JINJA2_NATIVE
def _compile_single_var(self, env):
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (env.variable_start_string, env.variable_end_string))
def copy_with_new_env(self, environment_class=AnsibleEnvironment, **kwargs):
r"""Creates a new copy of Templar with a new environment.
:kwarg environment_class: Environment class used for creating a new environment.
:kwarg \*\*kwargs: Optional arguments for the new environment that override existing
environment attributes.
:returns: Copy of Templar with updated environment.
"""
# We need to use __new__ to skip __init__, mainly not to create a new
# environment there only to override it below
new_env = object.__new__(environment_class)
new_env.__dict__.update(self.environment.__dict__)
new_templar = object.__new__(Templar)
new_templar.__dict__.update(self.__dict__)
new_templar.environment = new_env
new_templar.jinja2_native = environment_class is AnsibleNativeEnvironment
mapping = {
'available_variables': new_templar,
'searchpath': new_env.loader,
}
for key, value in kwargs.items():
obj = mapping.get(key, new_env)
try:
if value is not None:
setattr(obj, key, value)
except AttributeError:
# Ignore invalid attrs
pass
return new_templar
def _get_extensions(self):
"""
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
"""
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
@property
def available_variables(self):
return self._available_variables
@available_variables.setter
def available_variables(self, variables):
"""
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods. We also clear the template cache here, as the variables
are being changed.
"""
if not isinstance(variables, Mapping):
raise AnsibleAssertionError("the type of 'variables' should be a Mapping but was a %s" % (type(variables)))
self._available_variables = variables
@contextmanager
def set_temporary_context(self, **kwargs):
"""Context manager used to set temporary templating context, without having to worry about resetting
original values afterward
Use a keyword that maps to the attr you are setting. Applies to ``self.environment`` by default, to
set context on another object, it must be in ``mapping``.
"""
mapping = {
'available_variables': self,
'searchpath': self.environment.loader,
}
original = {}
for key, value in kwargs.items():
obj = mapping.get(key, self.environment)
try:
original[key] = getattr(obj, key)
if value is not None:
setattr(obj, key, value)
except AttributeError:
# Ignore invalid attrs
pass
yield
for key in original:
obj = mapping.get(key, self.environment)
setattr(obj, key, original[key])
def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
convert_data=True, static_vars=None, cache=None, disable_lookups=False):
"""
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
"""
static_vars = [] if static_vars is None else static_vars
if cache is not None:
display.deprecated("The `cache` option to `Templar.template` is no longer functional, and will be removed in a future release.", version='2.18')
# Don't template unsafe variables, just return them.
if hasattr(variable, '__UNSAFE__'):
return variable
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, string_types):
if not self.is_possibly_template(variable, overrides):
return variable
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
result = self.do_template(
variable,
preserve_trailing_newlines=preserve_trailing_newlines,
escape_backslashes=escape_backslashes,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
convert_data=convert_data,
)
self._compile_single_var(self.environment)
return result
elif is_sequence(variable):
return [self.template(
v,
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
) for v in variable]
elif isinstance(variable, Mapping):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
if k not in static_vars:
d[k] = self.template(
variable[k],
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
else:
d[k] = variable[k]
return d
else:
return variable
def is_template(self, data):
"""lets us know if data has a template"""
if isinstance(data, string_types):
return is_template(data, self.environment)
elif isinstance(data, (list, tuple)):
for v in data:
if self.is_template(v):
return True
elif isinstance(data, dict):
for k in data:
if self.is_template(k) or self.is_template(data[k]):
return True
return False
templatable = is_template
def is_possibly_template(self, data, overrides=None):
data, env = _create_overlay(data, overrides, self.environment)
return is_possibly_template(data, env)
def _convert_bare_variable(self, variable):
"""
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
"""
if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _fail_lookup(self, name, *args, **kwargs):
raise AnsibleError("The lookup `%s` was found, however lookups were disabled from templating" % name)
def _now_datetime(self, utc=False, fmt=None):
"""jinja2 global function to return current datetime, potentially formatted via strftime"""
if utc:
now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
else:
now = datetime.datetime.now()
if fmt:
return now.strftime(fmt)
return now
def _query_lookup(self, name, /, *args, **kwargs):
""" wrapper for lookup, force wantlist true"""
kwargs['wantlist'] = True
return self._lookup(name, *args, **kwargs)
def _lookup(self, name, /, *args, **kwargs):
instance = lookup_loader.get(name, loader=self._loader, templar=self)
if instance is None:
raise AnsibleError("lookup plugin (%s) not found" % name)
wantlist = kwargs.pop('wantlist', False)
allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
errors = kwargs.pop('errors', 'strict')
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except AnsibleOptionsError as e:
# invalid options given to lookup, just reraise
raise e
except AnsibleLookupError as e:
# lookup handled error but still decided to bail
msg = 'Lookup failed but the error is being ignored: %s' % to_native(e)
if errors == 'warn':
display.warning(msg)
elif errors == 'ignore':
display.display(msg, log_only=True)
else:
raise e
return [] if wantlist else None
except Exception as e:
# errors not handled by lookup
msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
(name, type(e), to_text(e))
if errors == 'warn':
display.warning(msg)
elif errors == 'ignore':
display.display(msg, log_only=True)
else:
display.vvv('exception during Jinja2 execution: {0}'.format(format_exc()))
raise AnsibleError(to_native(msg), orig_exc=e)
return [] if wantlist else None
if not is_sequence(ran):
display.deprecated(
f'The lookup plugin \'{name}\' was expected to return a list, got \'{type(ran)}\' instead. '
f'The lookup plugin \'{name}\' needs to be changed to return a list. '
'This will be an error in Ansible 2.18',
version='2.18'
)
if ran and allow_unsafe is False:
if self.cur_context:
self.cur_context.unsafe = True
if wantlist:
return wrap_var(ran)
try:
if isinstance(ran[0], NativeJinjaText):
ran = wrap_var(NativeJinjaText(",".join(ran)))
else:
ran = wrap_var(",".join(ran))
except TypeError:
# Lookup Plugins should always return lists. Throw an error if that's not
# the case:
if not isinstance(ran, Sequence):
raise AnsibleError("The lookup plugin '%s' did not return a list."
% name)
# The TypeError we can recover from is when the value *inside* of the list
# is not a string
if len(ran) == 1:
ran = wrap_var(ran[0])
else:
ran = wrap_var(ran)
except KeyError:
# Lookup Plugin returned a dict. Return comma-separated string of keys
# for backwards compat.
# FIXME this can be removed when support for non-list return types is removed.
# See https://github.com/ansible/ansible/pull/77789
ran = wrap_var(",".join(ran))
return ran
def _make_undefined(self, hint=None):
from jinja2.runtime import Undefined
if hint is None or isinstance(hint, Undefined) or hint == '':
hint = "Mandatory variable has not been overridden"
return AnsibleUndefined(hint)
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False,
convert_data=False):
if self.jinja2_native and not isinstance(data, string_types):
return data
# For preserving the number of input newlines in the output (used
# later in this method)
data_newlines = _count_newlines_from_end(data)
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
# NOTE Creating an overlay that lives only inside do_template means that overrides are not applied
# when templating nested variables in AnsibleJ2Vars where Templar.environment is used, not the overlay.
data, myenv = _create_overlay(data, overrides, self.environment)
# in case delimiters change
self._compile_single_var(myenv)
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
data = _escape_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except (TemplateSyntaxError, SyntaxError) as e:
raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)), orig_exc=e)
except Exception as e:
if 'recursion' in to_native(e):
raise AnsibleError("recursive loop detected in template string: %s" % to_native(data), orig_exc=e)
else:
return data
if disable_lookups:
t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
jvars = AnsibleJ2Vars(self, t.globals)
# In case this is a recursive call to do_template we need to
# save/restore cur_context to prevent overriding __UNSAFE__.
cached_context = self.cur_context
# In case this is a recursive call and we set different concat
# function up the stack, reset it in case the value of convert_data
# changed in this call
myenv.concat = myenv.__class__.concat
# the concat function is set for each Ansible environment,
# however for convert_data=False we need to use the concat
# function that avoids any evaluation and set it temporarily
# on the environment so it is used correctly even when
# the concat function is called internally in Jinja,
# most notably for macro execution
if not self.jinja2_native and not convert_data:
myenv.concat = ansible_concat
self.cur_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(self.cur_context)
try:
res = myenv.concat(rf)
unsafe = getattr(self.cur_context, 'unsafe', False)
if unsafe:
res = wrap_var(res)
except TypeError as te:
if 'AnsibleUndefined' in to_native(te):
errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
raise AnsibleUndefinedVariable(errmsg, orig_exc=te)
else:
display.debug("failing because of a type error, template data is: %s" % to_text(data))
raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)), orig_exc=te)
finally:
self.cur_context = cached_context
if isinstance(res, string_types) and preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
#
# Using Environment's keep_trailing_newline instead would
# result in change in behavior when trailing newlines
# would be kept also for included templates, for example:
# "Hello {% include 'world.txt' %}!" would render as
# "Hello world\n!\n" instead of "Hello world!\n".
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += myenv.newline_sequence * (data_newlines - res_newlines)
if unsafe:
res = wrap_var(res)
return res
except UndefinedError as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
display.debug("Ignoring undefined failure: %s" % to_text(e))
return data
except AnsibleUndefinedVariable as e:
if fail_on_undefined:
raise
display.debug("Ignoring undefined failure: %s" % to_text(e))
return data
# for backwards compatibility in case anyone is using old private method directly
_do_template = do_template
| 40,402
|
Python
|
.py
| 840
| 37.834524
| 156
| 0.630308
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,349
|
loader.py
|
ansible_ansible/lib/ansible/plugins/loader.py
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import glob
import os
import os.path
import pkgutil
import sys
import warnings
from collections import defaultdict, namedtuple
from importlib import import_module
from traceback import format_exc
import ansible.module_utils.compat.typing as t
from .filter import AnsibleJinja2Filter
from .test import AnsibleJinja2Test
from ansible import __version__ as ansible_version
from ansible import constants as C
from ansible.errors import AnsibleError, AnsiblePluginCircularRedirect, AnsiblePluginRemovedError, AnsibleCollectionUnsupportedVersionError
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible.module_utils.six import string_types
from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder, _get_collection_metadata
from ansible.utils.display import Display
from ansible.utils.plugin_docs import add_fragments
# TODO: take the packaging dep, or vendor SpecifierSet?
try:
from packaging.specifiers import SpecifierSet
from packaging.version import Version
except ImportError:
SpecifierSet = None # type: ignore[misc]
Version = None # type: ignore[misc]
import importlib.util
_PLUGIN_FILTERS = defaultdict(frozenset) # type: t.DefaultDict[str, frozenset]
display = Display()
get_with_context_result = namedtuple('get_with_context_result', ['object', 'plugin_load_context'])
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
def add_all_plugin_dirs(path):
""" add any existing plugin dirs in the path provided """
b_path = os.path.expanduser(to_bytes(path, errors='surrogate_or_strict'))
if os.path.isdir(b_path):
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(b_path, to_bytes(obj.subdir))
if os.path.isdir(plugin_path):
obj.add_directory(to_text(plugin_path))
else:
display.warning("Ignoring invalid path provided to plugin path: '%s' is not a directory" % to_text(path))
def get_shell_plugin(shell_type=None, executable=None):
if not shell_type:
# default to sh
shell_type = 'sh'
# mostly for backwards compat
if executable:
if isinstance(executable, string_types):
shell_filename = os.path.basename(executable)
try:
shell = shell_loader.get(shell_filename)
except Exception:
shell = None
if shell is None:
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
shell_type = shell.SHELL_FAMILY
break
else:
raise AnsibleError("Either a shell type or a shell executable must be provided ")
shell = shell_loader.get(shell_type)
if not shell:
raise AnsibleError("Could not find the shell plugin required (%s)." % shell_type)
if executable:
setattr(shell, 'executable', executable)
return shell
def add_dirs_to_loader(which_loader, paths):
loader = getattr(sys.modules[__name__], '%s_loader' % which_loader)
for path in paths:
loader.add_directory(path, with_subdir=True)
class PluginPathContext(object):
def __init__(self, path, internal):
self.path = path
self.internal = internal
class PluginLoadContext(object):
def __init__(self):
self.original_name = None
self.redirect_list = []
self.error_list = []
self.import_error_list = []
self.load_attempts = []
self.pending_redirect = None
self.exit_reason = None
self.plugin_resolved_path = None
self.plugin_resolved_name = None
self.plugin_resolved_collection = None # empty string for resolved plugins from user-supplied paths
self.deprecated = False
self.removal_date = None
self.removal_version = None
self.deprecation_warnings = []
self.resolved = False
self._resolved_fqcn = None
self.action_plugin = None
@property
def resolved_fqcn(self):
if not self.resolved:
return
if not self._resolved_fqcn:
final_plugin = self.redirect_list[-1]
if AnsibleCollectionRef.is_valid_fqcr(final_plugin) and final_plugin.startswith('ansible.legacy.'):
final_plugin = final_plugin.split('ansible.legacy.')[-1]
if self.plugin_resolved_collection and not AnsibleCollectionRef.is_valid_fqcr(final_plugin):
final_plugin = self.plugin_resolved_collection + '.' + final_plugin
self._resolved_fqcn = final_plugin
return self._resolved_fqcn
def record_deprecation(self, name, deprecation, collection_name):
if not deprecation:
return self
# The `or ''` instead of using `.get(..., '')` makes sure that even if the user explicitly
# sets `warning_text` to `~` (None) or `false`, we still get an empty string.
warning_text = deprecation.get('warning_text', None) or ''
removal_date = deprecation.get('removal_date', None)
removal_version = deprecation.get('removal_version', None)
# If both removal_date and removal_version are specified, use removal_date
if removal_date is not None:
removal_version = None
warning_text = '{0} has been deprecated.{1}{2}'.format(name, ' ' if warning_text else '', warning_text)
display.deprecated(warning_text, date=removal_date, version=removal_version, collection_name=collection_name)
self.deprecated = True
if removal_date:
self.removal_date = removal_date
if removal_version:
self.removal_version = removal_version
self.deprecation_warnings.append(warning_text)
return self
def resolve(self, resolved_name, resolved_path, resolved_collection, exit_reason, action_plugin):
self.pending_redirect = None
self.plugin_resolved_name = resolved_name
self.plugin_resolved_path = resolved_path
self.plugin_resolved_collection = resolved_collection
self.exit_reason = exit_reason
self.resolved = True
self.action_plugin = action_plugin
return self
def redirect(self, redirect_name):
self.pending_redirect = redirect_name
self.exit_reason = 'pending redirect resolution from {0} to {1}'.format(self.original_name, redirect_name)
self.resolved = False
return self
def nope(self, exit_reason):
self.pending_redirect = None
self.exit_reason = exit_reason
self.resolved = False
return self
class PluginLoader:
"""
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of play basedirs, configured
paths, and the python path. The first match is used.
"""
def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
aliases = {} if aliases is None else aliases
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.subdir = subdir
# FIXME: remove alias dict in favor of alias by symlink?
self.aliases = aliases
if config and not isinstance(config, list):
config = [config]
elif not config:
config = []
self.config = config
if class_name not in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if class_name not in PATH_CACHE:
PATH_CACHE[class_name] = None
if class_name not in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
# hold dirs added at runtime outside of config
self._extra_dirs = []
# caches
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._plugin_instance_cache = {} if self.subdir == 'vars_plugins' else None
self._searched_paths = set()
@property
def type(self):
return AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
def __repr__(self):
return 'PluginLoader(type={0})'.format(self.type)
def _clear_caches(self):
if C.OLD_PLUGIN_CACHE_CLEARING:
self._paths = None
else:
# reset global caches
MODULE_CACHE[self.class_name] = {}
PATH_CACHE[self.class_name] = None
PLUGIN_PATH_CACHE[self.class_name] = defaultdict(dict)
# reset internal caches
self._module_cache = MODULE_CACHE[self.class_name]
self._paths = PATH_CACHE[self.class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[self.class_name]
self._plugin_instance_cache = {} if self.subdir == 'vars_plugins' else None
self._searched_paths = set()
def __setstate__(self, data):
"""
Deserializer.
"""
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
"""
Serializer.
"""
return dict(
class_name=self.class_name,
base_class=self.base_class,
package=self.package,
config=self.config,
subdir=self.subdir,
aliases=self.aliases,
_extra_dirs=self._extra_dirs,
_searched_paths=self._searched_paths,
PATH_CACHE=PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
)
def format_paths(self, paths):
""" Returns a string suitable for printing of the search path """
# Uses a list to get the order right
ret = []
for i in paths:
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def print_paths(self):
return self.format_paths(self._get_paths(subdirs=False))
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir, followlinks=True):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root, x))
return results
def _get_package_paths(self, subdirs=True):
""" Gets the path of a Python package """
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
for parent_mod in parts:
m = getattr(m, parent_mod)
self.package_path = to_text(os.path.dirname(m.__file__), errors='surrogate_or_strict')
if subdirs:
return self._all_directories(self.package_path)
return [self.package_path]
def _get_paths_with_context(self, subdirs=True):
""" Return a list of PluginPathContext objects to search for plugins in """
# FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
# In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
# which always calls it with subdirs=False. So there currently isn't a problem with this caching.
if self._paths is not None:
return self._paths
ret = [PluginPathContext(p, False) for p in self._extra_dirs]
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
for path in self.config:
path = os.path.abspath(os.path.expanduser(path))
if subdirs:
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
c = to_text(c, errors='surrogate_or_strict')
if os.path.isdir(c) and c not in ret:
ret.append(PluginPathContext(c, False))
path = to_text(path, errors='surrogate_or_strict')
if path not in ret:
ret.append(PluginPathContext(path, False))
# look for any plugins installed in the package subtree
# Note package path always gets added last so that every other type of
# path is searched before it.
ret.extend([PluginPathContext(p, True) for p in self._get_package_paths(subdirs=subdirs)])
# HACK: because powershell modules are in the same directory
# hierarchy as other modules we have to process them last. This is
# because powershell only works on windows but the other modules work
# anywhere (possibly including windows if the correct language
# interpreter is installed). the non-powershell modules can have any
# file extension and thus powershell modules are picked up in that.
# The non-hack way to fix this is to have powershell modules be
# a different PluginLoader/ModuleLoader. But that requires changing
# other things too (known thing to change would be PATHS_CACHE,
# PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
# on the class_name and neither regular modules nor powershell modules
# would have class_names, they would not work as written.
#
# The expected sort order is paths in the order in 'ret' with paths ending in '/windows' at the end,
# also in the original order they were found in 'ret'.
# The .sort() method is guaranteed to be stable, so original order is preserved.
ret.sort(key=lambda p: p.path.endswith('/windows'))
# cache and return the result
self._paths = ret
return ret
def _get_paths(self, subdirs=True):
""" Return a list of paths to search for plugins in """
paths_with_context = self._get_paths_with_context(subdirs=subdirs)
return [path_with_context.path for path_with_context in paths_with_context]
def _load_config_defs(self, name, module, path):
""" Reads plugin docs to find configuration setting definitions, to push to config manager for later use """
# plugins w/o class name don't support config
if self.class_name:
type_name = get_plugin_class(self.class_name)
# if type name != 'module_doc_fragment':
if type_name in C.CONFIGURABLE_PLUGINS and not C.config.has_configuration_definition(type_name, name):
dstring = AnsibleLoader(getattr(module, 'DOCUMENTATION', ''), file_name=path).get_single_data()
# TODO: allow configurable plugins to use sidecar
# if not dstring:
# filename, cn = find_plugin_docfile( name, type_name, self, [os.path.dirname(path)], C.YAML_DOC_EXTENSIONS)
# # TODO: dstring = AnsibleLoader(, file_name=path).get_single_data()
if dstring:
add_fragments(dstring, path, fragment_loader=fragment_loader, is_module=(type_name == 'module'))
if 'options' in dstring and isinstance(dstring['options'], dict):
C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
def add_directory(self, directory, with_subdir=False):
""" Adds an additional directory to the search path """
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._clear_caches()
display.debug('Added %s to loader search path' % (directory))
def _query_collection_routing_meta(self, acr, plugin_type, extension=None):
collection_pkg = import_module(acr.n_python_collection_package_name)
if not collection_pkg:
return None
# FIXME: shouldn't need this...
try:
# force any type-specific metadata postprocessing to occur
import_module(acr.n_python_collection_package_name + '.plugins.{0}'.format(plugin_type))
except ImportError:
pass
# this will be created by the collection PEP302 loader
collection_meta = getattr(collection_pkg, '_collection_meta', None)
if not collection_meta:
return None
# TODO: add subdirs support
# check for extension-specific entry first (eg 'setup.ps1')
# TODO: str/bytes on extension/name munging
if acr.subdirs:
subdir_qualified_resource = '.'.join([acr.subdirs, acr.resource])
else:
subdir_qualified_resource = acr.resource
entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource + extension, None)
if not entry:
# try for extension-agnostic entry
entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource, None)
return entry
def _find_fq_plugin(self, fq_name, extension, plugin_load_context, ignore_deprecated=False):
"""Search builtin paths to find a plugin. No external paths are searched,
meaning plugins inside roles inside collections will be ignored.
"""
plugin_load_context.resolved = False
plugin_type = AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
acr = AnsibleCollectionRef.from_fqcr(fq_name, plugin_type)
# check collection metadata to see if any special handling is required for this plugin
routing_metadata = self._query_collection_routing_meta(acr, plugin_type, extension=extension)
action_plugin = None
# TODO: factor this into a wrapper method
if routing_metadata:
deprecation = routing_metadata.get('deprecation', None)
# this will no-op if there's no deprecation metadata for this plugin
if not ignore_deprecated:
plugin_load_context.record_deprecation(fq_name, deprecation, acr.collection)
tombstone = routing_metadata.get('tombstone', None)
# FIXME: clean up text gen
if tombstone:
removal_date = tombstone.get('removal_date')
removal_version = tombstone.get('removal_version')
warning_text = tombstone.get('warning_text') or ''
warning_text = '{0} has been removed.{1}{2}'.format(fq_name, ' ' if warning_text else '', warning_text)
removed_msg = display.get_deprecation_message(msg=warning_text, version=removal_version,
date=removal_date, removed=True,
collection_name=acr.collection)
plugin_load_context.removal_date = removal_date
plugin_load_context.removal_version = removal_version
plugin_load_context.resolved = True
plugin_load_context.exit_reason = removed_msg
raise AnsiblePluginRemovedError(removed_msg, plugin_load_context=plugin_load_context)
redirect = routing_metadata.get('redirect', None)
if redirect:
# Prevent mystery redirects that would be determined by the collections keyword
if not AnsibleCollectionRef.is_valid_fqcr(redirect):
raise AnsibleError(
f"Collection {acr.collection} contains invalid redirect for {fq_name}: {redirect}. "
"Redirects must use fully qualified collection names."
)
# FIXME: remove once this is covered in debug or whatever
display.vv("redirecting (type: {0}) {1} to {2}".format(plugin_type, fq_name, redirect))
# The name doing the redirection is added at the beginning of _resolve_plugin_step,
# but if the unqualified name is used in conjunction with the collections keyword, only
# the unqualified name is in the redirect list.
if fq_name not in plugin_load_context.redirect_list:
plugin_load_context.redirect_list.append(fq_name)
return plugin_load_context.redirect(redirect)
# TODO: non-FQCN case, do we support `.` prefix for current collection, assume it with no dots, require it for subdirs in current, or ?
if self.type == 'modules':
action_plugin = routing_metadata.get('action_plugin')
n_resource = to_native(acr.resource, errors='strict')
# we want this before the extension is added
full_name = '{0}.{1}'.format(acr.n_python_package_name, n_resource)
if extension:
n_resource += extension
pkg = sys.modules.get(acr.n_python_package_name)
if not pkg:
# FIXME: there must be cheaper/safer way to do this
try:
pkg = import_module(acr.n_python_package_name)
except ImportError:
return plugin_load_context.nope('Python package {0} not found'.format(acr.n_python_package_name))
pkg_path = os.path.dirname(pkg.__file__)
n_resource_path = os.path.join(pkg_path, n_resource)
# FIXME: and is file or file link or ...
if os.path.exists(n_resource_path):
return plugin_load_context.resolve(
full_name, to_text(n_resource_path), acr.collection, 'found exact match for {0} in {1}'.format(full_name, acr.collection), action_plugin)
if extension:
# the request was extension-specific, don't try for an extensionless match
return plugin_load_context.nope('no match for {0} in {1}'.format(to_text(n_resource), acr.collection))
# look for any matching extension in the package location (sans filter)
found_files = [f
for f in glob.iglob(os.path.join(pkg_path, n_resource) + '.*')
if os.path.isfile(f) and not f.endswith(C.MODULE_IGNORE_EXTS)]
if not found_files:
return plugin_load_context.nope('failed fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
found_files = sorted(found_files) # sort to ensure deterministic results, with the shortest match first
if len(found_files) > 1:
display.debug('Found several possible candidates for the plugin but using first: %s' % ','.join(found_files))
return plugin_load_context.resolve(
full_name, to_text(found_files[0]), acr.collection,
'found fuzzy extension match for {0} in {1}'.format(full_name, acr.collection), action_plugin)
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
""" Find a plugin named name """
result = self.find_plugin_with_context(name, mod_type, ignore_deprecated, check_aliases, collection_list)
if result.resolved and result.plugin_resolved_path:
return result.plugin_resolved_path
return None
def find_plugin_with_context(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
""" Find a plugin named name, returning contextual info about the load, recursively resolving redirection """
plugin_load_context = PluginLoadContext()
plugin_load_context.original_name = name
while True:
result = self._resolve_plugin_step(name, mod_type, ignore_deprecated, check_aliases, collection_list, plugin_load_context=plugin_load_context)
if result.pending_redirect:
if result.pending_redirect in result.redirect_list:
raise AnsiblePluginCircularRedirect('plugin redirect loop resolving {0} (path: {1})'.format(result.original_name, result.redirect_list))
name = result.pending_redirect
result.pending_redirect = None
plugin_load_context = result
else:
break
# TODO: smuggle these to the controller when we're in a worker, reduce noise from normal things like missing plugin packages during collection search
if plugin_load_context.error_list:
display.warning("errors were encountered during the plugin load for {0}:\n{1}".format(name, plugin_load_context.error_list))
# TODO: display/return import_error_list? Only useful for forensics...
# FIXME: store structured deprecation data in PluginLoadContext and use display.deprecate
# if plugin_load_context.deprecated and C.config.get_config_value('DEPRECATION_WARNINGS'):
# for dw in plugin_load_context.deprecation_warnings:
# # TODO: need to smuggle these to the controller if we're in a worker context
# display.warning('[DEPRECATION WARNING] ' + dw)
return plugin_load_context
# FIXME: name bikeshed
def _resolve_plugin_step(self, name, mod_type='', ignore_deprecated=False,
check_aliases=False, collection_list=None, plugin_load_context=PluginLoadContext()):
if not plugin_load_context:
raise ValueError('A PluginLoadContext is required')
plugin_load_context.redirect_list.append(name)
plugin_load_context.resolved = False
if name in _PLUGIN_FILTERS[self.package]:
plugin_load_context.exit_reason = '{0} matched a defined plugin filter'.format(name)
return plugin_load_context
if mod_type:
suffix = mod_type
elif self.class_name:
# Ansible plugins that run in the controller process (most plugins)
suffix = '.py'
else:
# Only Ansible Modules. Ansible modules can be any executable so
# they can have any suffix
suffix = ''
# FIXME: need this right now so we can still load shipped PS module_utils- come up with a more robust solution
if (AnsibleCollectionRef.is_valid_fqcr(name) or collection_list) and not name.startswith('Ansible'):
if '.' in name or not collection_list:
candidates = [name]
else:
candidates = ['{0}.{1}'.format(c, name) for c in collection_list]
for candidate_name in candidates:
try:
plugin_load_context.load_attempts.append(candidate_name)
# HACK: refactor this properly
if candidate_name.startswith('ansible.legacy'):
# 'ansible.legacy' refers to the plugin finding behavior used before collections existed.
# They need to search 'library' and the various '*_plugins' directories in order to find the file.
plugin_load_context = self._find_plugin_legacy(name.removeprefix('ansible.legacy.'),
plugin_load_context, ignore_deprecated, check_aliases, suffix)
else:
# 'ansible.builtin' should be handled here. This means only internal, or builtin, paths are searched.
plugin_load_context = self._find_fq_plugin(candidate_name, suffix, plugin_load_context=plugin_load_context,
ignore_deprecated=ignore_deprecated)
# Pending redirects are added to the redirect_list at the beginning of _resolve_plugin_step.
# Once redirects are resolved, ensure the final FQCN is added here.
# e.g. 'ns.coll.module' is included rather than only 'module' if a collections list is provided:
# - module:
# collections: ['ns.coll']
if plugin_load_context.resolved and candidate_name not in plugin_load_context.redirect_list:
plugin_load_context.redirect_list.append(candidate_name)
if plugin_load_context.resolved or plugin_load_context.pending_redirect: # if we got an answer or need to chase down a redirect, return
return plugin_load_context
except (AnsiblePluginRemovedError, AnsiblePluginCircularRedirect, AnsibleCollectionUnsupportedVersionError):
# these are generally fatal, let them fly
raise
except ImportError as ie:
plugin_load_context.import_error_list.append(ie)
except Exception as ex:
# FIXME: keep actual errors, not just assembled messages
plugin_load_context.error_list.append(to_native(ex))
if plugin_load_context.error_list:
display.debug(msg='plugin lookup for {0} failed; errors: {1}'.format(name, '; '.join(plugin_load_context.error_list)))
plugin_load_context.exit_reason = 'no matches found for {0}'.format(name)
return plugin_load_context
# if we got here, there's no collection list and it's not an FQ name, so do legacy lookup
return self._find_plugin_legacy(name, plugin_load_context, ignore_deprecated, check_aliases, suffix)
def _find_plugin_legacy(self, name, plugin_load_context, ignore_deprecated=False, check_aliases=False, suffix=None):
"""Search library and various *_plugins paths in order to find the file.
This was behavior prior to the existence of collections.
"""
plugin_load_context.resolved = False
if check_aliases:
name = self.aliases.get(name, name)
# The particular cache to look for modules within. This matches the
# requested mod_type
pull_cache = self._plugin_path_cache[suffix]
try:
path_with_context = pull_cache[name]
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context._resolved_fqcn = ('ansible.builtin.' + name if path_with_context.internal else name)
plugin_load_context.resolved = True
return plugin_load_context
except KeyError:
# Cache miss. Now let's find the plugin
pass
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator.
# We can use _get_paths_with_context() since add_directory() forces a cache refresh.
for path_with_context in (p for p in self._get_paths_with_context() if p.path not in self._searched_paths and os.path.isdir(to_bytes(p.path))):
path = path_with_context.path
b_path = to_bytes(path)
display.debug('trying %s' % path)
plugin_load_context.load_attempts.append(path)
internal = path_with_context.internal
try:
full_paths = (os.path.join(b_path, f) for f in os.listdir(b_path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % to_text(e))
for full_path in (to_native(f) for f in full_paths if os.path.isfile(f) and not f.endswith(b'__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
# FIXME: I believe this is only correct for modules and module_utils.
# For all other plugins we want .pyc and .pyo should be valid
if any(full_path.endswith(x) for x in C.MODULE_IGNORE_EXTS):
continue
splitname = os.path.splitext(full_name)
base_name = splitname[0]
try:
extension = splitname[1]
except IndexError:
extension = ''
# everything downstream expects unicode
full_path = to_text(full_path, errors='surrogate_or_strict')
# Module found, now enter it into the caches that match this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = PluginPathContext(full_path, internal)
if full_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][full_name] = PluginPathContext(full_path, internal)
if base_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][base_name] = PluginPathContext(full_path, internal)
if full_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][full_name] = PluginPathContext(full_path, internal)
self._searched_paths.add(path)
try:
path_with_context = pull_cache[name]
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context._resolved_fqcn = 'ansible.builtin.' + name if path_with_context.internal else name
plugin_load_context.resolved = True
return plugin_load_context
except KeyError:
# Didn't find the plugin in this directory. Load modules from the next one
pass
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
alias_name = '_' + name
# We've already cached all the paths at this point
if alias_name in pull_cache:
path_with_context = pull_cache[alias_name]
if not ignore_deprecated and not os.path.islink(path_with_context.path):
# FIXME: this is not always the case, some are just aliases
display.deprecated('%s is kept for backwards compatibility but usage is discouraged. ' # pylint: disable=ansible-deprecated-no-version
'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = alias_name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context._resolved_fqcn = 'ansible.builtin.' + alias_name if path_with_context.internal else alias_name
plugin_load_context.resolved = True
return plugin_load_context
# last ditch, if it's something that can be redirected, look for a builtin redirect before giving up
candidate_fqcr = 'ansible.builtin.{0}'.format(name)
if '.' not in name and AnsibleCollectionRef.is_valid_fqcr(candidate_fqcr):
return self._find_fq_plugin(fq_name=candidate_fqcr, extension=suffix, plugin_load_context=plugin_load_context, ignore_deprecated=ignore_deprecated)
return plugin_load_context.nope('{0} is not eligible for last-chance resolution'.format(name))
def has_plugin(self, name, collection_list=None):
""" Checks if a plugin named name exists """
try:
return self.find_plugin(name, collection_list=collection_list) is not None
except Exception as ex:
if isinstance(ex, AnsibleError):
raise
# log and continue, likely an innocuous type/package loading failure in collections import
display.debug('has_plugin error: {0}'.format(to_text(ex)))
__contains__ = has_plugin
def _load_module_source(self, name, path):
# avoid collisions across plugins
if name.startswith('ansible_collections.'):
full_name = name
else:
full_name = '.'.join([self.package, name])
if full_name in sys.modules:
# Avoids double loading, See https://github.com/ansible/ansible/issues/13110
return sys.modules[full_name]
with warnings.catch_warnings():
# FIXME: this still has issues if the module was previously imported but not "cached",
# we should bypass this entire codepath for things that are directly importable
warnings.simplefilter("ignore", RuntimeWarning)
spec = importlib.util.spec_from_file_location(to_native(full_name), to_native(path))
module = importlib.util.module_from_spec(spec)
# mimic import machinery; make the module-being-loaded available in sys.modules during import
# and remove if there's a failure...
sys.modules[full_name] = module
try:
spec.loader.exec_module(module)
except Exception:
del sys.modules[full_name]
raise
return module
def _update_object(self, obj, name, path, redirected_names=None, resolved=None):
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
setattr(obj, '_load_name', name)
setattr(obj, '_redirected_names', redirected_names or [])
names = []
if resolved:
names.append(resolved)
if redirected_names:
# reverse list so best name comes first
names.extend(redirected_names[::-1])
if not names:
raise AnsibleError(f"Missing FQCN for plugin source {name}")
setattr(obj, 'ansible_aliases', names)
setattr(obj, 'ansible_name', names[0])
def get(self, name, *args, **kwargs):
return self.get_with_context(name, *args, **kwargs).object
def get_with_context(self, name, *args, **kwargs):
""" instantiates a plugin of the given name using arguments """
found_in_cache = True
class_only = kwargs.pop('class_only', False)
collection_list = kwargs.pop('collection_list', None)
if name in self.aliases:
name = self.aliases[name]
if (cached_result := (self._plugin_instance_cache or {}).get(name)) and cached_result[1].resolved:
# Resolving the FQCN is slow, even if we've passed in the resolved FQCN.
# Short-circuit here if we've previously resolved this name.
# This will need to be restricted if non-vars plugins start using the cache, since
# some non-fqcn plugin need to be resolved again with the collections list.
return get_with_context_result(*cached_result)
plugin_load_context = self.find_plugin_with_context(name, collection_list=collection_list)
if not plugin_load_context.resolved or not plugin_load_context.plugin_resolved_path:
# FIXME: this is probably an error (eg removed plugin)
return get_with_context_result(None, plugin_load_context)
fq_name = plugin_load_context.resolved_fqcn
if '.' not in fq_name and plugin_load_context.plugin_resolved_collection:
fq_name = '.'.join((plugin_load_context.plugin_resolved_collection, fq_name))
resolved_type_name = plugin_load_context.plugin_resolved_name
path = plugin_load_context.plugin_resolved_path
if (cached_result := (self._plugin_instance_cache or {}).get(fq_name)) and cached_result[1].resolved:
# This is unused by vars plugins, but it's here in case the instance cache expands to other plugin types.
# We get here if we've seen this plugin before, but it wasn't called with the resolved FQCN.
return get_with_context_result(*cached_result)
redirected_names = plugin_load_context.redirect_list or []
if path not in self._module_cache:
self._module_cache[path] = self._load_module_source(resolved_type_name, path)
found_in_cache = False
self._load_config_defs(resolved_type_name, self._module_cache[path], path)
obj = getattr(self._module_cache[path], self.class_name)
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
return get_with_context_result(None, plugin_load_context)
if not issubclass(obj, plugin_class):
return get_with_context_result(None, plugin_load_context)
# FIXME: update this to use the load context
self._display_plugin_load(self.class_name, resolved_type_name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
# A plugin may need to use its _load_name in __init__ (for example, to set
# or get options from config), so update the object before using the constructor
instance = object.__new__(obj)
self._update_object(instance, resolved_type_name, path, redirected_names, fq_name)
obj.__init__(instance, *args, **kwargs) # pylint: disable=unnecessary-dunder-call
obj = instance
except TypeError as e:
if "abstract" in e.args[0]:
# Abstract Base Class or incomplete plugin, don't load
display.v('Returning not found on "%s" as it has unimplemented abstract methods; %s' % (resolved_type_name, to_native(e)))
return get_with_context_result(None, plugin_load_context)
raise
self._update_object(obj, resolved_type_name, path, redirected_names, fq_name)
if self._plugin_instance_cache is not None and getattr(obj, 'is_stateless', False):
self._plugin_instance_cache[fq_name] = (obj, plugin_load_context)
elif self._plugin_instance_cache is not None:
# The cache doubles as the load order, so record the FQCN even if the plugin hasn't set is_stateless = True
self._plugin_instance_cache[fq_name] = (None, PluginLoadContext())
return get_with_context_result(obj, plugin_load_context)
def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None):
""" formats data to display debug info for plugin loading, also avoids processing unless really needed """
if C.DEFAULT_DEBUG:
msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path)
if len(searched_paths) > 1:
msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths))
if found_in_cache or class_only:
msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only)
display.debug(msg)
def all(self, *args, **kwargs):
"""
Iterate through all plugins of this type, in configured paths (no collections)
A plugin loader is initialized with a specific type. This function is an iterator returning
all of the plugins of that type to the caller.
:kwarg path_only: If this is set to True, then we return the paths to where the plugins reside
instead of an instance of the plugin. This conflicts with class_only and both should
not be set.
:kwarg class_only: If this is set to True then we return the python class which implements
a plugin rather than an instance of the plugin. This conflicts with path_only and both
should not be set.
:kwarg _dedupe: By default, we only return one plugin per plugin name. Deduplication happens
in the same way as the :meth:`get` and :meth:`find_plugin` methods resolve which plugin
should take precedence. If this is set to False, then we return all of the plugins
found, including those with duplicate names. In the case of duplicates, the order in
which they are returned is the one that would take precedence first, followed by the
others in decreasing precedence order. This should only be used by subclasses which
want to manage their own deduplication of the plugins.
:*args: Any extra arguments are passed to each plugin when it is instantiated.
:**kwargs: Any extra keyword arguments are passed to each plugin when it is instantiated.
"""
# TODO: Change the signature of this method to:
# def all(return_type='instance', args=None, kwargs=None):
# if args is None: args = []
# if kwargs is None: kwargs = {}
# return_type can be instance, class, or path.
# These changes will mean that plugin parameters won't conflict with our params and
# will also make it impossible to request both a path and a class at the same time.
#
# Move _dedupe to be a class attribute, CUSTOM_DEDUPE, with subclasses for filters and
# tests setting it to True
dedupe = kwargs.pop('_dedupe', True)
path_only = kwargs.pop('path_only', False)
class_only = kwargs.pop('class_only', False)
# Having both path_only and class_only is a coding bug
if path_only and class_only:
raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
all_matches = []
found_in_cache = True
legacy_excluding_builtin = set()
for path_with_context in self._get_paths_with_context():
matches = glob.glob(to_native(os.path.join(path_with_context.path, "*.py")))
if not path_with_context.internal:
legacy_excluding_builtin.update(matches)
# we sort within each path, but keep path precedence from config
all_matches.extend(sorted(matches, key=os.path.basename))
loaded_modules = set()
for path in all_matches:
name = os.path.splitext(path)[0]
basename = os.path.basename(name)
is_j2 = isinstance(self, Jinja2Loader)
if is_j2:
ref_name = path
else:
ref_name = basename
if not is_j2 and basename in _PLUGIN_FILTERS[self.package]:
# j2 plugins get processed in own class, here they would just be container files
display.debug("'%s' skipped due to a defined plugin filter" % basename)
continue
if basename == '__init__' or (basename == 'base' and self.package == 'ansible.plugins.cache'):
# cache has legacy 'base.py' file, which is wrapper for __init__.py
display.debug("'%s' skipped due to reserved name" % name)
continue
if dedupe and ref_name in loaded_modules:
# for j2 this is 'same file', other plugins it is basename
display.debug("'%s' skipped as duplicate" % ref_name)
continue
loaded_modules.add(ref_name)
if path_only:
yield path
continue
if path in legacy_excluding_builtin:
fqcn = basename
else:
fqcn = f"ansible.builtin.{basename}"
if (cached_result := (self._plugin_instance_cache or {}).get(fqcn)) and cached_result[1].resolved:
# Here just in case, but we don't call all() multiple times for vars plugins, so this should not be used.
yield cached_result[0]
continue
if path not in self._module_cache:
if self.type in ('filter', 'test'):
# filter and test plugin files can contain multiple plugins
# they must have a unique python module name to prevent them from shadowing each other
full_name = '{0}_{1}'.format(abs(hash(path)), basename)
else:
full_name = basename
try:
module = self._load_module_source(full_name, path)
except Exception as e:
display.warning("Skipping plugin (%s), cannot load: %s" % (path, to_text(e)))
continue
self._module_cache[path] = module
found_in_cache = False
else:
module = self._module_cache[path]
self._load_config_defs(basename, module, path)
try:
obj = getattr(module, self.class_name)
except AttributeError as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
continue
if not issubclass(obj, plugin_class):
continue
self._display_plugin_load(self.class_name, basename, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
obj = obj(*args, **kwargs)
except TypeError as e:
display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))
self._update_object(obj, basename, path, resolved=fqcn)
if self._plugin_instance_cache is not None:
needs_enabled = False
if hasattr(obj, 'REQUIRES_ENABLED'):
needs_enabled = obj.REQUIRES_ENABLED
if not needs_enabled:
# Use get_with_context to cache the plugin the first time we see it.
self.get_with_context(fqcn)[0]
yield obj
class Jinja2Loader(PluginLoader):
"""
PluginLoader optimized for Jinja2 plugins
The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
We need to do a few things differently in the base class because of file == plugin
assumptions and dedupe logic.
"""
def __init__(self, class_name, package, config, subdir, plugin_wrapper_type, aliases=None, required_base_class=None):
super(Jinja2Loader, self).__init__(class_name, package, config, subdir, aliases=aliases, required_base_class=required_base_class)
self._plugin_wrapper_type = plugin_wrapper_type
self._cached_non_collection_wrappers = {}
def _clear_caches(self):
super(Jinja2Loader, self)._clear_caches()
self._cached_non_collection_wrappers = {}
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
raise NotImplementedError('find_plugin is not supported on Jinja2Loader')
@property
def method_map_name(self):
return get_plugin_class(self.class_name) + 's'
def get_contained_plugins(self, collection, plugin_path, name):
plugins = []
full_name = '.'.join(['ansible_collections', collection, 'plugins', self.type, name])
try:
# use 'parent' loader class to find files, but cannot return this as it can contain multiple plugins per file
if plugin_path not in self._module_cache:
self._module_cache[plugin_path] = self._load_module_source(full_name, plugin_path)
module = self._module_cache[plugin_path]
obj = getattr(module, self.class_name)
except Exception as e:
raise KeyError('Failed to load %s for %s: %s' % (plugin_path, collection, to_native(e)))
plugin_impl = obj()
if plugin_impl is None:
raise KeyError('Could not find %s.%s' % (collection, name))
try:
method_map = getattr(plugin_impl, self.method_map_name)
plugin_map = method_map().items()
except Exception as e:
display.warning("Ignoring %s plugins in '%s' as it seems to be invalid: %r" % (self.type, to_text(plugin_path), e))
return plugins
for func_name, func in plugin_map:
fq_name = '.'.join((collection, func_name))
full = '.'.join((full_name, func_name))
plugin = self._plugin_wrapper_type(func)
if plugin in plugins:
continue
self._update_object(plugin, full, plugin_path, resolved=fq_name)
plugins.append(plugin)
return plugins
# FUTURE: now that the resulting plugins are closer, refactor base class method with some extra
# hooks so we can avoid all the duplicated plugin metadata logic, and also cache the collection results properly here
def get_with_context(self, name, *args, **kwargs):
# pop N/A kwargs to avoid passthrough to parent methods
kwargs.pop('class_only', False)
kwargs.pop('collection_list', None)
context = PluginLoadContext()
# avoid collection path for legacy
name = name.removeprefix('ansible.legacy.')
self._ensure_non_collection_wrappers(*args, **kwargs)
# check for stuff loaded via legacy/builtin paths first
if known_plugin := self._cached_non_collection_wrappers.get(name):
context.resolved = True
context.plugin_resolved_name = name
context.plugin_resolved_path = known_plugin._original_path
context.plugin_resolved_collection = 'ansible.builtin' if known_plugin.ansible_name.startswith('ansible.builtin.') else ''
context._resolved_fqcn = known_plugin.ansible_name
return get_with_context_result(known_plugin, context)
plugin = None
key, leaf_key = get_fqcr_and_name(name)
seen = set()
# follow the meta!
while True:
if key in seen:
raise AnsibleError('recursive collection redirect found for %r' % name, 0)
seen.add(key)
acr = AnsibleCollectionRef.try_parse_fqcr(key, self.type)
if not acr:
raise KeyError('invalid plugin name: {0}'.format(key))
try:
ts = _get_collection_metadata(acr.collection)
except ValueError as e:
# no collection
raise KeyError('Invalid plugin FQCN ({0}): {1}'.format(key, to_native(e)))
# TODO: implement cycle detection (unified across collection redir as well)
routing_entry = ts.get('plugin_routing', {}).get(self.type, {}).get(leaf_key, {})
# check deprecations
deprecation_entry = routing_entry.get('deprecation')
if deprecation_entry:
warning_text = deprecation_entry.get('warning_text') or ''
removal_date = deprecation_entry.get('removal_date')
removal_version = deprecation_entry.get('removal_version')
warning_text = f'{self.type.title()} "{key}" has been deprecated.{" " if warning_text else ""}{warning_text}'
display.deprecated(warning_text, version=removal_version, date=removal_date, collection_name=acr.collection)
# check removal
tombstone_entry = routing_entry.get('tombstone')
if tombstone_entry:
warning_text = tombstone_entry.get('warning_text') or ''
removal_date = tombstone_entry.get('removal_date')
removal_version = tombstone_entry.get('removal_version')
warning_text = f'{self.type.title()} "{key}" has been removed.{" " if warning_text else ""}{warning_text}'
exc_msg = display.get_deprecation_message(warning_text, version=removal_version, date=removal_date,
collection_name=acr.collection, removed=True)
raise AnsiblePluginRemovedError(exc_msg)
# check redirects
redirect = routing_entry.get('redirect', None)
if redirect:
if not AnsibleCollectionRef.is_valid_fqcr(redirect):
raise AnsibleError(
f"Collection {acr.collection} contains invalid redirect for {acr.collection}.{acr.resource}: {redirect}. "
"Redirects must use fully qualified collection names."
)
next_key, leaf_key = get_fqcr_and_name(redirect, collection=acr.collection)
display.vvv('redirecting (type: {0}) {1}.{2} to {3}'.format(self.type, acr.collection, acr.resource, next_key))
key = next_key
else:
break
try:
pkg = import_module(acr.n_python_package_name)
except ImportError as e:
raise KeyError(to_native(e))
parent_prefix = acr.collection
if acr.subdirs:
parent_prefix = '{0}.{1}'.format(parent_prefix, acr.subdirs)
try:
for dummy, module_name, ispkg in pkgutil.iter_modules(pkg.__path__, prefix=parent_prefix + '.'):
if ispkg:
continue
try:
# use 'parent' loader class to find files, but cannot return this as it can contain
# multiple plugins per file
plugin_impl = super(Jinja2Loader, self).get_with_context(module_name, *args, **kwargs)
method_map = getattr(plugin_impl.object, self.method_map_name)
plugin_map = method_map().items()
except Exception as e:
display.warning(f"Skipping {self.type} plugins in {module_name}'; an error occurred while loading: {e}")
continue
for func_name, func in plugin_map:
fq_name = '.'.join((parent_prefix, func_name))
src_name = f"ansible_collections.{acr.collection}.plugins.{self.type}.{acr.subdirs}.{func_name}"
# TODO: load anyways into CACHE so we only match each at end of loop
# the files themselves should already be cached by base class caching of modules(python)
if key in (func_name, fq_name):
plugin = self._plugin_wrapper_type(func)
if plugin:
context = plugin_impl.plugin_load_context
self._update_object(plugin, src_name, plugin_impl.object._original_path, resolved=fq_name)
# context will have filename, which for tests/filters might not be correct
context._resolved_fqcn = plugin.ansible_name
# FIXME: once we start caching these results, we'll be missing functions that would have loaded later
break # go to next file as it can override if dupe (dont break both loops)
except AnsiblePluginRemovedError as apre:
raise AnsibleError(to_native(apre), 0, orig_exc=apre)
except (AnsibleError, KeyError):
raise
except Exception as ex:
display.warning('An unexpected error occurred during Jinja2 plugin loading: {0}'.format(to_native(ex)))
display.vvv('Unexpected error during Jinja2 plugin loading: {0}'.format(format_exc()))
raise AnsibleError(to_native(ex), 0, orig_exc=ex)
return get_with_context_result(plugin, context)
def all(self, *args, **kwargs):
kwargs.pop('_dedupe', None)
path_only = kwargs.pop('path_only', False)
class_only = kwargs.pop('class_only', False) # basically ignored for test/filters since they are functions
# Having both path_only and class_only is a coding bug
if path_only and class_only:
raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
self._ensure_non_collection_wrappers(*args, **kwargs)
if path_only:
yield from (w._original_path for w in self._cached_non_collection_wrappers.values())
else:
yield from (w for w in self._cached_non_collection_wrappers.values())
def _ensure_non_collection_wrappers(self, *args, **kwargs):
if self._cached_non_collection_wrappers:
return
# get plugins from files in configured paths (multiple in each)
for p_map in super(Jinja2Loader, self).all(*args, **kwargs):
is_builtin = p_map.ansible_name.startswith('ansible.builtin.')
# p_map is really object from file with class that holds multiple plugins
plugins_list = getattr(p_map, self.method_map_name)
try:
plugins = plugins_list()
except Exception as e:
display.vvvv("Skipping %s plugins in '%s' as it seems to be invalid: %r" % (self.type, to_text(p_map._original_path), e))
continue
for plugin_name in plugins.keys():
if '.' in plugin_name:
display.debug(f'{plugin_name} skipped in {p_map._original_path}; Jinja plugin short names may not contain "."')
continue
if plugin_name in _PLUGIN_FILTERS[self.package]:
display.debug("%s skipped due to a defined plugin filter" % plugin_name)
continue
# the plugin class returned by the loader may host multiple Jinja plugins, but we wrap each plugin in
# its own surrogate wrapper instance here to ease the bookkeeping...
wrapper = self._plugin_wrapper_type(plugins[plugin_name])
fqcn = plugin_name
collection = '.'.join(p_map.ansible_name.split('.')[:2]) if p_map.ansible_name.count('.') >= 2 else ''
if not plugin_name.startswith(collection):
fqcn = f"{collection}.{plugin_name}"
self._update_object(wrapper, plugin_name, p_map._original_path, resolved=fqcn)
target_names = {plugin_name, fqcn}
if is_builtin:
target_names.add(f'ansible.builtin.{plugin_name}')
for target_name in target_names:
if existing_plugin := self._cached_non_collection_wrappers.get(target_name):
display.debug(f'Jinja plugin {target_name} from {p_map._original_path} skipped; '
f'shadowed by plugin from {existing_plugin._original_path})')
continue
self._cached_non_collection_wrappers[target_name] = wrapper
def get_fqcr_and_name(resource, collection='ansible.builtin'):
if '.' not in resource:
name = resource
fqcr = collection + '.' + resource
else:
name = resource.split('.')[-1]
fqcr = resource
return fqcr, name
def _load_plugin_filter():
filters = _PLUGIN_FILTERS
user_set = False
if C.PLUGIN_FILTERS_CFG is None:
filter_cfg = '/etc/ansible/plugin_filters.yml'
else:
filter_cfg = C.PLUGIN_FILTERS_CFG
user_set = True
if os.path.exists(filter_cfg):
with open(filter_cfg, 'rb') as f:
try:
filter_data = from_yaml(f.read())
except Exception as e:
display.warning(u'The plugin filter file, {0} was not parsable.'
u' Skipping: {1}'.format(filter_cfg, to_text(e)))
return filters
try:
version = filter_data['filter_version']
except KeyError:
display.warning(u'The plugin filter file, {0} was invalid.'
u' Skipping.'.format(filter_cfg))
return filters
# Try to convert for people specifying version as a float instead of string
version = to_text(version)
version = version.strip()
# Modules and action plugins share the same reject list since the difference between the
# two isn't visible to the users
if version == u'1.0':
try:
filters['ansible.modules'] = frozenset(filter_data['module_rejectlist'])
except TypeError:
display.warning(u'Unable to parse the plugin filter file {0} as'
u' module_rejectlist is not a list.'
u' Skipping.'.format(filter_cfg))
return filters
filters['ansible.plugins.action'] = filters['ansible.modules']
else:
display.warning(u'The plugin filter file, {0} was a version not recognized by this'
u' version of Ansible. Skipping.'.format(filter_cfg))
else:
if user_set:
display.warning(u'The plugin filter file, {0} does not exist.'
u' Skipping.'.format(filter_cfg))
# Special case: the stat module as Ansible can run very few things if stat is rejected
if 'stat' in filters['ansible.modules']:
raise AnsibleError('The stat module was specified in the module reject list file, {0}, but'
' Ansible will not function without the stat module. Please remove stat'
' from the reject list.'.format(to_native(filter_cfg)))
return filters
# since we don't want the actual collection loader understanding metadata, we'll do it in an event handler
def _on_collection_load_handler(collection_name, collection_path):
display.vvvv(to_text('Loading collection {0} from {1}'.format(collection_name, collection_path)))
collection_meta = _get_collection_metadata(collection_name)
try:
if not _does_collection_support_ansible_version(collection_meta.get('requires_ansible', ''), ansible_version):
mismatch_behavior = C.config.get_config_value('COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH')
message = 'Collection {0} does not support Ansible version {1}'.format(collection_name, ansible_version)
if mismatch_behavior == 'warning':
display.warning(message)
elif mismatch_behavior == 'error':
raise AnsibleCollectionUnsupportedVersionError(message)
except AnsibleError:
raise
except Exception as ex:
display.warning('Error parsing collection metadata requires_ansible value from collection {0}: {1}'.format(collection_name, ex))
def _does_collection_support_ansible_version(requirement_string, ansible_version):
if not requirement_string:
return True
if not SpecifierSet:
display.warning('packaging Python module unavailable; unable to validate collection Ansible version requirements')
return True
ss = SpecifierSet(requirement_string)
# ignore prerelease/postrelease/beta/dev flags for simplicity
base_ansible_version = Version(ansible_version).base_version
return ss.contains(base_ansible_version)
def _configure_collection_loader(prefix_collections_path=None):
if AnsibleCollectionConfig.collection_finder:
# this must be a Python warning so that it can be filtered out by the import sanity test
warnings.warn('AnsibleCollectionFinder has already been configured')
return
if prefix_collections_path is None:
prefix_collections_path = []
paths = list(prefix_collections_path) + C.COLLECTIONS_PATHS
finder = _AnsibleCollectionFinder(paths, C.COLLECTIONS_SCAN_SYS_PATH)
finder._install()
# this should succeed now
AnsibleCollectionConfig.on_collection_load += _on_collection_load_handler
def init_plugin_loader(prefix_collections_path=None):
"""Initialize the plugin filters and the collection loaders
This method must be called to configure and insert the collection python loaders
into ``sys.meta_path`` and ``sys.path_hooks``.
This method is only called in ``CLI.run`` after CLI args have been parsed, so that
instantiation of the collection finder can utilize parsed CLI args, and to not cause
side effects.
"""
_load_plugin_filter()
_configure_collection_loader(prefix_collections_path)
# TODO: Evaluate making these class instantiations lazy, but keep them in the global scope
# doc fragments first
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.plugins.doc_fragments',
C.DOC_FRAGMENT_PLUGIN_PATH,
'doc_fragments',
)
action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins',
required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins',
)
connection_loader = PluginLoader(
'Connection',
'ansible.plugins.connection',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library',
)
module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
# NB: dedicated loader is currently necessary because PS module_utils expects "with subdir" lookup where
# regular module_utils doesn't. This can be revisited once we have more granular loaders.
ps_module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins',
required_base_class='LookupBase',
)
filter_loader = Jinja2Loader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins',
AnsibleJinja2Filter
)
test_loader = Jinja2Loader(
'TestModule',
'ansible.plugins.test',
C.DEFAULT_TEST_PLUGIN_PATH,
'test_plugins',
AnsibleJinja2Test
)
strategy_loader = PluginLoader(
'StrategyModule',
'ansible.plugins.strategy',
C.DEFAULT_STRATEGY_PLUGIN_PATH,
'strategy_plugins',
required_base_class='StrategyBase',
)
terminal_loader = PluginLoader(
'TerminalModule',
'ansible.plugins.terminal',
C.DEFAULT_TERMINAL_PLUGIN_PATH,
'terminal_plugins',
required_base_class='TerminalBase'
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins',
)
cliconf_loader = PluginLoader(
'Cliconf',
'ansible.plugins.cliconf',
C.DEFAULT_CLICONF_PLUGIN_PATH,
'cliconf_plugins',
required_base_class='CliconfBase'
)
netconf_loader = PluginLoader(
'Netconf',
'ansible.plugins.netconf',
C.DEFAULT_NETCONF_PLUGIN_PATH,
'netconf_plugins',
required_base_class='NetconfBase'
)
inventory_loader = PluginLoader(
'InventoryModule',
'ansible.plugins.inventory',
C.DEFAULT_INVENTORY_PLUGIN_PATH,
'inventory_plugins'
)
httpapi_loader = PluginLoader(
'HttpApi',
'ansible.plugins.httpapi',
C.DEFAULT_HTTPAPI_PLUGIN_PATH,
'httpapi_plugins',
required_base_class='HttpApiBase',
)
become_loader = PluginLoader(
'BecomeModule',
'ansible.plugins.become',
C.BECOME_PLUGIN_PATH,
'become_plugins'
)
| 74,433
|
Python
|
.py
| 1,341
| 43.832215
| 159
| 0.629801
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,350
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/__init__.py
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from abc import ABC
import types
import typing as t
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.utils.display import Display
display = Display()
if t.TYPE_CHECKING:
from .loader import PluginPathContext
# Global so that all instances of a PluginLoader will share the caches
MODULE_CACHE = {} # type: dict[str, dict[str, types.ModuleType]]
PATH_CACHE = {} # type: dict[str, list[PluginPathContext] | None]
PLUGIN_PATH_CACHE = {} # type: dict[str, dict[str, dict[str, PluginPathContext]]]
def get_plugin_class(obj):
if isinstance(obj, string_types):
return obj.lower().replace('module', '')
else:
return obj.__class__.__name__.lower().replace('module', '')
class AnsiblePlugin(ABC):
# Set by plugin loader
_load_name: str
# allow extra passthrough parameters
allow_extras: bool = False
_extras_prefix: str | None = None
def __init__(self):
self._options = {}
self._defs = None
@property
def extras_prefix(self):
if not self._extras_prefix:
self._extras_prefix = self._load_name.split('.')[-1]
return self._extras_prefix
def matches_name(self, possible_names):
possible_fqcns = set()
for name in possible_names:
if '.' not in name:
possible_fqcns.add(f"ansible.builtin.{name}")
elif name.startswith("ansible.legacy."):
possible_fqcns.add(name.removeprefix("ansible.legacy."))
possible_fqcns.add(name)
return bool(possible_fqcns.intersection(set(self.ansible_aliases)))
def get_option_and_origin(self, option, hostvars=None):
try:
option_value, origin = C.config.get_config_value_and_origin(option, plugin_type=self.plugin_type, plugin_name=self._load_name, variables=hostvars)
except AnsibleError as e:
raise KeyError(to_native(e))
return option_value, origin
def get_option(self, option, hostvars=None):
if option not in self._options:
option_value, dummy = self.get_option_and_origin(option, hostvars=hostvars)
self.set_option(option, option_value)
return self._options.get(option)
def get_options(self, hostvars=None):
options = {}
for option in self.option_definitions.keys():
options[option] = self.get_option(option, hostvars=hostvars)
return options
def set_option(self, option, value):
self._options[option] = C.config.get_config_value(option, plugin_type=self.plugin_type, plugin_name=self._load_name, direct={option: value})
C.handle_config_noise(display)
def set_options(self, task_keys=None, var_options=None, direct=None):
"""
Sets the _options attribute with the configuration/keyword information for this plugin
:arg task_keys: Dict with playbook keywords that affect this option
:arg var_options: Dict with either 'connection variables'
:arg direct: Dict with 'direct assignment'
"""
self._options = C.config.get_plugin_options(self.plugin_type, self._load_name, keys=task_keys, variables=var_options, direct=direct)
# allow extras/wildcards from vars that are not directly consumed in configuration
# this is needed to support things like winrm that can have extended protocol options we don't directly handle
if self.allow_extras and var_options and '_extras' in var_options:
# these are largely unvalidated passthroughs, either plugin or underlying API will validate
self._options['_extras'] = var_options['_extras']
C.handle_config_noise(display)
def has_option(self, option):
if not self._options:
self.set_options()
return option in self._options
@property
def plugin_type(self):
return self.__class__.__name__.lower().replace('module', '')
@property
def option_definitions(self):
if self._defs is None:
self._defs = C.config.get_configuration_definitions(plugin_type=self.plugin_type, name=self._load_name)
return self._defs
def _check_required(self):
# FIXME: standardize required check based on config
pass
class AnsibleJinja2Plugin(AnsiblePlugin):
def __init__(self, function):
super(AnsibleJinja2Plugin, self).__init__()
self._function = function
@property
def plugin_type(self):
return self.__class__.__name__.lower().replace('ansiblejinja2', '')
def _no_options(self, *args, **kwargs):
raise NotImplementedError()
has_option = get_option = get_options = option_definitions = set_option = set_options = _no_options
@property
def j2_function(self):
return self._function
| 5,803
|
Python
|
.py
| 123
| 40.650407
| 158
| 0.689936
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,351
|
list.py
|
ansible_ansible/lib/ansible/plugins/list.py
|
# (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
from ansible import context
from ansible import constants as C
from ansible.collections.list import list_collections
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native, to_bytes
from ansible.plugins import loader
from ansible.utils.display import Display
from ansible.utils.collection_loader._collection_finder import _get_collection_path
display = Display()
# not real plugins
IGNORE = {
# ptype: names
'module': ('async_wrapper', ),
'cache': ('base', ),
}
def get_composite_name(collection, name, path, depth):
resolved_collection = collection
if '.' not in name:
resource_name = name
else:
if collection == 'ansible.legacy' and name.startswith('ansible.builtin.'):
resolved_collection = 'ansible.builtin'
resource_name = '.'.join(name.split(f"{resolved_collection}.")[1:])
# create FQCN
composite = [resolved_collection]
if depth:
composite.extend(path.split(os.path.sep)[depth * -1:])
composite.append(to_native(resource_name))
return '.'.join(composite)
def _list_plugins_from_paths(ptype, dirs, collection, depth=0):
# TODO: update to use importlib.resources
plugins = {}
for path in dirs:
display.debug("Searching '{0}'s '{1}' for {2} plugins".format(collection, path, ptype))
b_path = to_bytes(path)
if os.path.basename(b_path).startswith((b'.', b'__')):
# skip hidden/special dirs
continue
if os.path.exists(b_path):
if os.path.isdir(b_path):
bkey = ptype.lower()
for plugin_file in os.listdir(b_path):
if plugin_file.startswith((b'.', b'__')):
# hidden or python internal file/dir
continue
display.debug("Found possible plugin: '{0}'".format(plugin_file))
b_plugin, b_ext = os.path.splitext(plugin_file)
plugin = to_native(b_plugin)
full_path = os.path.join(b_path, plugin_file)
if os.path.isdir(full_path):
# its a dir, recurse
if collection in C.SYNTHETIC_COLLECTIONS:
if not os.path.exists(os.path.join(full_path, b'__init__.py')):
# dont recurse for synthetic unless init.py present
continue
# actually recurse dirs
plugins.update(_list_plugins_from_paths(ptype, [to_native(full_path)], collection, depth=depth + 1))
else:
if any([
plugin in C.IGNORE_FILES, # general files to ignore
to_native(b_ext) in C.REJECT_EXTS, # general extensions to ignore
b_ext in (b'.yml', b'.yaml', b'.json'), # ignore docs files TODO: constant!
plugin in IGNORE.get(bkey, ()), # plugin in reject list
os.path.islink(full_path), # skip aliases, author should document in 'aliases' field
]):
continue
if ptype in ('test', 'filter'):
try:
file_plugins = _list_j2_plugins_from_file(collection, full_path, ptype, plugin)
except KeyError as e:
display.warning('Skipping file %s: %s' % (full_path, to_native(e)))
continue
for plugin in file_plugins:
plugin_name = get_composite_name(collection, plugin.ansible_name, os.path.dirname(to_native(full_path)), depth)
plugins[plugin_name] = full_path
else:
plugin_name = get_composite_name(collection, plugin, os.path.dirname(to_native(full_path)), depth)
plugins[plugin_name] = full_path
else:
display.debug("Skip listing plugins in '{0}' as it is not a directory".format(path))
else:
display.debug("Skip listing plugins in '{0}' as it does not exist".format(path))
return plugins
def _list_j2_plugins_from_file(collection, plugin_path, ptype, plugin_name):
ploader = getattr(loader, '{0}_loader'.format(ptype))
file_plugins = ploader.get_contained_plugins(collection, plugin_path, plugin_name)
return file_plugins
def list_collection_plugins(ptype, collections, search_paths=None):
# TODO: update to use importlib.resources
# starts at {plugin_name: filepath, ...}, but changes at the end
plugins = {}
try:
ploader = getattr(loader, '{0}_loader'.format(ptype))
except AttributeError:
raise AnsibleError('Cannot list plugins, incorrect plugin type supplied: {0}'.format(ptype))
# get plugins for each collection
for collection in collections.keys():
if collection == 'ansible.builtin':
# dirs from ansible install, but not configured paths
dirs = [d.path for d in ploader._get_paths_with_context() if d.internal]
elif collection == 'ansible.legacy':
# configured paths + search paths (should include basedirs/-M)
dirs = [d.path for d in ploader._get_paths_with_context() if not d.internal]
if context.CLIARGS.get('module_path', None):
dirs.extend(context.CLIARGS['module_path'])
else:
# search path in this case is for locating collection itselfA
b_ptype = to_bytes(C.COLLECTION_PTYPE_COMPAT.get(ptype, ptype))
dirs = [to_native(os.path.join(collections[collection], b'plugins', b_ptype))]
# acr = AnsibleCollectionRef.try_parse_fqcr(collection, ptype)
# if acr:
# dirs = acr.subdirs
# else:
# raise Exception('bad acr for %s, %s' % (collection, ptype))
plugins.update(_list_plugins_from_paths(ptype, dirs, collection))
# return plugin and it's class object, None for those not verifiable or failing
if ptype in ('module',):
# no 'invalid' tests for modules
for plugin in plugins.keys():
plugins[plugin] = (plugins[plugin], None)
else:
# detect invalid plugin candidates AND add loaded object to return data
for plugin in list(plugins.keys()):
pobj = None
try:
pobj = ploader.get(plugin, class_only=True)
except Exception as e:
display.vvv("The '{0}' {1} plugin could not be loaded from '{2}': {3}".format(plugin, ptype, plugins[plugin], to_native(e)))
# sets final {plugin_name: (filepath, class|NONE if not loaded), ...}
plugins[plugin] = (plugins[plugin], pobj)
# {plugin_name: (filepath, class), ...}
return plugins
def list_plugins(ptype, collections=None, search_paths=None):
if isinstance(collections, str):
collections = [collections]
# {plugin_name: (filepath, class), ...}
plugins = {}
plugin_collections = {}
if collections is None:
# list all collections, add synthetic ones
plugin_collections['ansible.builtin'] = b''
plugin_collections['ansible.legacy'] = b''
plugin_collections.update(list_collections(search_paths=search_paths, dedupe=True))
else:
for collection in collections:
if collection == 'ansible.legacy':
# add builtin, since legacy also resolves to these
plugin_collections[collection] = b''
plugin_collections['ansible.builtin'] = b''
else:
try:
plugin_collections[collection] = to_bytes(_get_collection_path(collection))
except ValueError as e:
raise AnsibleError("Cannot use supplied collection {0}: {1}".format(collection, to_native(e)), orig_exc=e)
if plugin_collections:
plugins.update(list_collection_plugins(ptype, plugin_collections))
return plugins
# wrappers
def list_plugin_names(ptype, collection=None):
return [plugin.ansible_name for plugin in list_plugins(ptype, collection)]
def list_plugin_files(ptype, collection=None):
plugins = list_plugins(ptype, collection)
return [plugins[k][0] for k in plugins.keys()]
def list_plugin_classes(ptype, collection=None):
plugins = list_plugins(ptype, collection)
return [plugins[k][1] for k in plugins.keys()]
| 8,920
|
Python
|
.py
| 170
| 40.147059
| 143
| 0.593107
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,352
|
auto.py
|
ansible_ansible/lib/ansible/plugins/inventory/auto.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: auto
author:
- Matt Davis (@nitzmahone)
version_added: "2.5"
short_description: Loads and executes an inventory plugin specified in a YAML config
description:
- By enabling the C(auto) inventory plugin, any YAML inventory config file with a
C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that
config. This effectively provides automatic enabling of all installed/accessible inventory plugins.
- To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element.
"""
EXAMPLES = """
# This plugin is not intended for direct use; it is a fallback mechanism for automatic enabling of
# all installed inventory plugins.
"""
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.plugins.loader import inventory_loader
class InventoryModule(BaseInventoryPlugin):
NAME = 'auto'
def verify_file(self, path):
if not path.endswith('.yml') and not path.endswith('.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=True):
config_data = loader.load_from_file(path, cache='none')
try:
plugin_name = config_data.get('plugin', None)
except AttributeError:
plugin_name = None
if not plugin_name:
raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path))
plugin = inventory_loader.get(plugin_name)
if not plugin:
raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name))
if not plugin.verify_file(path):
raise AnsibleParserError("inventory source '{0}' could not be verified by inventory plugin '{1}'".format(path, plugin_name))
self.display.v("Using inventory plugin '{0}' to process inventory source '{1}'".format(plugin._load_name, path))
plugin.parse(inventory, loader, path, cache=cache)
try:
plugin.update_cache_if_changed()
except AttributeError:
pass
| 2,443
|
Python
|
.py
| 47
| 44.829787
| 136
| 0.700546
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,353
|
generator.py
|
ansible_ansible/lib/ansible/plugins/inventory/generator.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: generator
version_added: "2.6"
short_description: Uses Jinja2 to construct hosts and groups from patterns
description:
- Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
- Create a template pattern that describes each host, and then use independent configuration layers
- Every element of every layer is combined to create a host for every layer combination
- Parent groups can be defined with reference to hosts and other groups using the same template variables
options:
plugin:
description: token that ensures this is a source file for the 'generator' plugin.
required: True
choices: ['ansible.builtin.generator', 'generator']
hosts:
description:
- The C(name) key is a template used to generate
hostnames based on the C(layers) option. Each variable in the name is expanded to create a
cartesian product of all possible layer combinations.
- The C(parents) are a list of parent groups that the host belongs to. Each C(parent) item
contains a C(name) key, again expanded from the template, and an optional C(parents) key
that lists its parents.
- Parents can also contain C(vars), which is a dictionary of vars that
is then always set for that variable. This can provide easy access to the group name. E.g
set an C(application) variable that is set to the value of the C(application) layer name.
layers:
description:
- A dictionary of layers, with the key being the layer name, used as a variable name in the C(host)
C(name) and C(parents) keys. Each layer value is a list of possible values for that layer.
"""
EXAMPLES = """
# inventory.config file in YAML format
# remember to enable this inventory plugin in the ansible.cfg before using
# View the output using `ansible-inventory -i inventory.config --list`
plugin: ansible.builtin.generator
hosts:
name: "{{ operation }}_{{ application }}_{{ environment }}_runner"
parents:
- name: "{{ operation }}_{{ application }}_{{ environment }}"
parents:
- name: "{{ operation }}_{{ application }}"
parents:
- name: "{{ operation }}"
- name: "{{ application }}"
- name: "{{ application }}_{{ environment }}"
parents:
- name: "{{ application }}"
vars:
application: "{{ application }}"
- name: "{{ environment }}"
vars:
environment: "{{ environment }}"
- name: runner
layers:
operation:
- build
- launch
environment:
- dev
- test
- prod
application:
- web
- api
"""
import os
from itertools import product
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
""" constructs groups and vars using Jinja2 template expressions """
NAME = 'generator'
def __init__(self):
super(InventoryModule, self).__init__()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def template(self, pattern, variables):
self.templar.available_variables = variables
return self.templar.do_template(pattern)
def add_parents(self, inventory, child, parents, template_vars):
for parent in parents:
try:
groupname = self.template(parent['name'], template_vars)
except (AttributeError, ValueError):
raise AnsibleParserError("Element %s has a parent with no name element" % child['name'])
if groupname not in inventory.groups:
inventory.add_group(groupname)
group = inventory.groups[groupname]
for (k, v) in parent.get('vars', {}).items():
group.set_variable(k, self.template(v, template_vars))
inventory.add_child(groupname, child)
self.add_parents(inventory, groupname, parent.get('parents', []), template_vars)
def parse(self, inventory, loader, path, cache=False):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
config = self._read_config_data(path)
template_inputs = product(*config['layers'].values())
for item in template_inputs:
template_vars = dict()
for i, key in enumerate(config['layers'].keys()):
template_vars[key] = item[i]
host = self.template(config['hosts']['name'], template_vars)
inventory.add_host(host)
self.add_parents(inventory, host, config['hosts'].get('parents', []), template_vars)
| 5,495
|
Python
|
.py
| 113
| 38.40708
| 131
| 0.623205
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,354
|
advanced_host_list.py
|
ansible_ansible/lib/ansible/plugins/inventory/advanced_host_list.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: advanced_host_list
version_added: "2.4"
short_description: Parses a 'host list' with ranges
description:
- Parses a host list string as a comma separated values of hosts and supports host ranges.
- This plugin only applies to inventory sources that are not paths and contain at least one comma.
"""
EXAMPLES = """
# simple range
# ansible -i 'host[1:10],' -m ping
# still supports w/o ranges also
# ansible-playbook -i 'localhost,' play.yml
"""
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
NAME = 'advanced_host_list'
def verify_file(self, host_list):
valid = False
b_path = to_bytes(host_list, errors='surrogate_or_strict')
if not os.path.exists(b_path) and ',' in host_list:
valid = True
return valid
def parse(self, inventory, loader, host_list, cache=True):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, host_list)
try:
for h in host_list.split(','):
h = h.strip()
if h:
try:
(hostnames, port) = self._expand_hostpattern(h)
except AnsibleError as e:
self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
hostnames = [h]
port = None
for host in hostnames:
if host not in self.inventory.hosts:
self.inventory.add_host(host, group='ungrouped', port=port)
except Exception as e:
raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
| 2,168
|
Python
|
.py
| 47
| 36.297872
| 117
| 0.625831
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,355
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/inventory/__init__.py
|
# (c) 2017, Red Hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
import hashlib
import os
import string
from collections.abc import Mapping
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.inventory.group import to_safe_group_name as original_safe
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import AnsiblePlugin
from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars, load_extra_vars
display = Display()
# Helper methods
def to_safe_group_name(name):
# placeholder for backwards compat
return original_safe(name, force=True, silent=True)
def detect_range(line=None):
"""
A helper function that checks a given host line to see if it contains
a range pattern described in the docstring above.
Returns True if the given line contains a pattern, else False.
"""
return '[' in line
def expand_hostname_range(line=None):
"""
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#hosts-and-groups
"""
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
# Add support for multiple ranges in a host so:
# db[01:10:3]node-[01:10]
# - to do this we split off at the first [...] set, getting the list
# of hosts and then repeat until none left.
# - also add an optional third parameter which contains the step. (Default: 1)
# so range can be [01:10:2] -> 01 03 05 07 09
(head, nrange, tail) = line.replace('[', '|', 1).replace(']', '|', 1).split('|')
bounds = nrange.split(":")
if len(bounds) != 2 and len(bounds) != 3:
raise AnsibleError("host range must be begin:end or begin:end:step")
beg = bounds[0]
end = bounds[1]
if len(bounds) == 2:
step = 1
else:
step = bounds[2]
if not beg:
beg = "0"
if not end:
raise AnsibleError("host range must specify end value")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise AnsibleError("host range must specify equal-length begin and end formats")
def fill(x):
return str(x).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise AnsibleError("host range must have begin <= end")
seq = list(string.ascii_letters[i_beg:i_end + 1:int(step)])
except ValueError: # not an alpha range
seq = range(int(beg), int(end) + 1, int(step))
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
if detect_range(hname):
all_hosts.extend(expand_hostname_range(hname))
else:
all_hosts.append(hname)
return all_hosts
def get_cache_plugin(plugin_name, **kwargs):
try:
cache = CacheObject(plugin_name, **kwargs)
except AnsibleError as e:
if 'fact_caching_connection' in to_native(e):
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
"to a writeable directory path:\nansible.cfg:\n[default]: fact_caching_connection,\n"
"[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
"ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
else:
raise e
if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
'from ansible.constants.'.format(plugin_name))
return cache
class BaseInventoryPlugin(AnsiblePlugin):
""" Parses an Inventory Source"""
TYPE = 'generator'
# 3rd party plugins redefine this to
# use custom group name sanitization
# since constructed features enforce
# it by default.
_sanitize_group_name = staticmethod(to_safe_group_name)
def __init__(self):
super(BaseInventoryPlugin, self).__init__()
self._options = {}
self.inventory = None
self.display = display
self._vars = {}
def parse(self, inventory, loader, path, cache=True):
""" Populates inventory from the given data. Raises an error on any parse failure
:arg inventory: a copy of the previously accumulated inventory data,
to be updated with any new data this plugin provides.
The inventory can be empty if no other source/plugin ran successfully.
:arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
it also has Vault support to automatically decrypt files.
:arg path: the string that represents the 'inventory source',
normally a path to a configuration file for this inventory,
but it can also be a raw string for this plugin to consume
:arg cache: a boolean that indicates if the plugin should use the cache or not
you can ignore if this plugin does not implement caching.
"""
self.loader = loader
self.inventory = inventory
self.templar = Templar(loader=loader)
self._vars = load_extra_vars(loader)
def verify_file(self, path):
""" Verify if file is usable by this plugin, base does minimal accessibility check
:arg path: a string that was passed as an inventory source,
it normally is a path to a config file, but this is not a requirement,
it can also be parsed itself as the inventory data to process.
So only call this base class if you expect it to be a file.
"""
valid = False
b_path = to_bytes(path, errors='surrogate_or_strict')
if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
valid = True
else:
self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
return valid
def _populate_host_vars(self, hosts, variables, group=None, port=None):
if not isinstance(variables, Mapping):
raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _read_config_data(self, path):
""" validate config and set options as appropriate
:arg path: path to common yaml format config file for this plugin
"""
config = {}
try:
# avoid loader cache so meta: refresh_inventory can pick up config changes
# if we read more than once, fs cache should be good enough
config = self.loader.load_from_file(path, cache='none')
except Exception as e:
raise AnsibleParserError(to_native(e))
# a plugin can be loaded via many different names with redirection- if so, we want to accept any of those names
valid_names = getattr(self, '_redirected_names') or [self.NAME]
if not config:
# no data
raise AnsibleParserError("%s is empty" % (to_native(path)))
elif config.get('plugin') not in valid_names:
# this is not my config file
raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
elif not isinstance(config, Mapping):
# configs are dictionaries
raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
self.set_options(direct=config, var_options=self._vars)
if 'cache' in self._options and self.get_option('cache'):
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
return config
def _consume_options(self, data):
""" update existing options from alternate configuration sources not normally used by Ansible.
Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
:arg data: key/value pairs that correspond to configuration options for this plugin
"""
for k in self._options:
if k in data:
self._options[k] = data.pop(k)
def _expand_hostpattern(self, hostpattern):
"""
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
"""
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except Exception:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
class BaseFileInventoryPlugin(BaseInventoryPlugin):
""" Parses a File based Inventory Source"""
TYPE = 'storage'
def __init__(self):
super(BaseFileInventoryPlugin, self).__init__()
class Cacheable(object):
_cache = CacheObject()
@property
def cache(self):
return self._cache
def load_cache_plugin(self):
plugin_name = self.get_option('cache_plugin')
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(plugin_name, **cache_options)
def get_cache_key(self, path):
return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
def _get_cache_prefix(self, path):
""" create predictable unique prefix for plugin/inventory """
m = hashlib.sha1()
m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
d1 = m.hexdigest()
n = hashlib.sha1()
n.update(to_bytes(path, errors='surrogate_or_strict'))
d2 = n.hexdigest()
return 's_'.join([d1[:5], d2[:5]])
def clear_cache(self):
self._cache.flush()
def update_cache_if_changed(self):
self._cache.update_cache_if_changed()
def set_cache_plugin(self):
self._cache.set_cache()
class Constructable(object):
def _compose(self, template, variables, disable_lookups=True):
""" helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars"""
t = self.templar
try:
use_extra = self.get_option('use_extra_vars')
except Exception:
use_extra = False
if use_extra:
t.available_variables = combine_vars(variables, self._vars)
else:
t.available_variables = variables
return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string),
disable_lookups=disable_lookups)
def _set_composite_vars(self, compose, variables, host, strict=False):
""" loops over compose entries to create vars for hosts """
if compose and isinstance(compose, dict):
for varname in compose:
try:
composite = self._compose(compose[varname], variables)
except Exception as e:
if strict:
raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
continue
self.inventory.set_variable(host, varname, composite)
def _add_host_to_composed_groups(self, groups, variables, host, strict=False, fetch_hostvars=True):
""" helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group"""
# process each 'group entry'
if groups and isinstance(groups, dict):
if fetch_hostvars:
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
group_name = self._sanitize_group_name(group_name)
try:
result = boolean(self.templar.template(conditional))
except Exception as e:
if strict:
raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
continue
if result:
# ensure group exists, use sanitized name
group_name = self.inventory.add_group(group_name)
# add host to group
self.inventory.add_child(group_name, host)
def _add_host_to_keyed_groups(self, keys, variables, host, strict=False, fetch_hostvars=True):
""" helper to create groups for plugins based on variable values and add the corresponding hosts to it"""
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
if fetch_hostvars:
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
try:
key = self._compose(keyed.get('key'), variables)
except Exception as e:
if strict:
raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
continue
default_value_name = keyed.get('default_value', None)
trailing_separator = keyed.get('trailing_separator')
if trailing_separator is not None and default_value_name is not None:
raise AnsibleParserError("parameters are mutually exclusive for keyed groups: default_value|trailing_separator")
if key or (key == '' and default_value_name is not None):
prefix = keyed.get('prefix', '')
sep = keyed.get('separator', '_')
raw_parent_name = keyed.get('parent_group', None)
if raw_parent_name:
try:
raw_parent_name = self.templar.template(raw_parent_name)
except AnsibleError as e:
if strict:
raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
continue
new_raw_group_names = []
if isinstance(key, string_types):
# if key is empty, 'default_value' will be used as group name
if key == '' and default_value_name is not None:
new_raw_group_names.append(default_value_name)
else:
new_raw_group_names.append(key)
elif isinstance(key, list):
for name in key:
# if list item is empty, 'default_value' will be used as group name
if name == '' and default_value_name is not None:
new_raw_group_names.append(default_value_name)
else:
new_raw_group_names.append(name)
elif isinstance(key, Mapping):
for (gname, gval) in key.items():
bare_name = '%s%s%s' % (gname, sep, gval)
if gval == '':
# key's value is empty
if default_value_name is not None:
bare_name = '%s%s%s' % (gname, sep, default_value_name)
elif trailing_separator is False:
bare_name = gname
new_raw_group_names.append(bare_name)
else:
raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
for bare_name in new_raw_group_names:
if prefix == '' and self.get_option('leading_separator') is False:
sep = ''
gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
result_gname = self.inventory.add_group(gname)
self.inventory.add_host(host, result_gname)
if raw_parent_name:
parent_name = self._sanitize_group_name(raw_parent_name)
self.inventory.add_group(parent_name)
self.inventory.add_child(parent_name, result_gname)
else:
# exclude case of empty list and dictionary, because these are valid constructions
# simply no groups need to be constructed, but are still falsy
if strict and key not in ([], {}):
raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
else:
raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
| 20,608
|
Python
|
.py
| 377
| 41.498674
| 154
| 0.595275
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,356
|
constructed.py
|
ansible_ansible/lib/ansible/plugins/inventory/constructed.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: constructed
version_added: "2.4"
short_description: Uses Jinja2 to construct vars and groups based on existing inventory.
description:
- Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
- The Jinja2 conditionals that qualify a host for membership.
- The Jinja2 expressions are calculated and assigned to the variables
- Only variables already available from previous inventories or the fact cache can be used for templating.
- When O(strict) is False, failed expressions will be ignored (assumes vars were missing).
options:
plugin:
description: token that ensures this is a source file for the 'constructed' plugin.
required: True
choices: ['ansible.builtin.constructed', 'constructed']
use_vars_plugins:
description:
- Normally, for performance reasons, vars plugins get executed after the inventory sources complete the base inventory,
this option allows for getting vars related to hosts/groups from those plugins.
- The host_group_vars (enabled by default) 'vars plugin' is the one responsible for reading host_vars/ and group_vars/ directories.
- This will execute all vars plugins, even those that are not supposed to execute at the 'inventory' stage.
See vars plugins docs for details on 'stage'.
- Implicit groups, such as 'all' or 'ungrouped', need to be explicitly defined in any previous inventory to apply the
corresponding group_vars
required: false
default: false
type: boolean
version_added: '2.11'
extends_documentation_fragment:
- constructed
"""
EXAMPLES = r"""
# inventory.config file in YAML format
plugin: ansible.builtin.constructed
strict: False
compose:
var_sum: var1 + var2
# this variable will only be set if I have a persistent fact cache enabled (and have non expired facts)
# `strict: False` will skip this instead of producing an error if it is missing facts.
server_type: "ansible_hostname | regex_replace ('(.{6})(.{2}).*', '\\2')"
groups:
# simple name matching
webservers: inventory_hostname.startswith('web')
# using ec2 'tags' (assumes aws inventory)
development: "'devel' in (ec2_tags|list)"
# using other host properties populated in inventory
private_only: not (public_dns_name is defined or ip_address is defined)
# complex group membership
multi_group: (group_names | intersect(['alpha', 'beta', 'omega'])) | length >= 2
keyed_groups:
# this creates a group per distro (distro_CentOS, distro_Debian) and assigns the hosts that have matching values to it,
# using the default separator "_"
- prefix: distro
key: ansible_distribution
# the following examples assume the first inventory is from the `aws_ec2` plugin
# this creates a group per ec2 architecture and assign hosts to the matching ones (arch_x86_64, arch_sparc, etc)
- prefix: arch
key: architecture
# this creates a group per ec2 region like "us_west_1"
- prefix: ""
separator: ""
key: placement.region
# this creates a common parent group for all ec2 availability zones
- key: placement.availability_zone
parent_group: all_ec2_zones
"""
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError, AnsibleOptionsError
from ansible.inventory.helpers import get_group_vars
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils.common.text.converters import to_native
from ansible.utils.vars import combine_vars
from ansible.vars.fact_cache import FactCache
from ansible.vars.plugins import get_vars_from_inventory_sources
class InventoryModule(BaseInventoryPlugin, Constructable):
""" constructs groups and vars using Jinja2 template expressions """
NAME = 'constructed'
def __init__(self):
super(InventoryModule, self).__init__()
self._cache = FactCache()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def get_all_host_vars(self, host, loader, sources):
""" requires host object """
return combine_vars(self.host_groupvars(host, loader, sources), self.host_vars(host, loader, sources))
def host_groupvars(self, host, loader, sources):
""" requires host object """
gvars = get_group_vars(host.get_groups())
if self.get_option('use_vars_plugins'):
gvars = combine_vars(gvars, get_vars_from_inventory_sources(loader, sources, host.get_groups(), 'all'))
return gvars
def host_vars(self, host, loader, sources):
""" requires host object """
hvars = host.get_vars()
if self.get_option('use_vars_plugins'):
hvars = combine_vars(hvars, get_vars_from_inventory_sources(loader, sources, [host], 'all'))
return hvars
def parse(self, inventory, loader, path, cache=False):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
self._read_config_data(path)
sources = []
try:
sources = inventory.processed_sources
except AttributeError:
if self.get_option('use_vars_plugins'):
raise AnsibleOptionsError("The option use_vars_plugins requires ansible >= 2.11.")
strict = self.get_option('strict')
fact_cache = FactCache()
try:
# Go over hosts (less var copies)
for host in inventory.hosts:
# get available variables to templar
hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
if host in fact_cache: # adds facts if cache is active
hostvars = combine_vars(hostvars, fact_cache[host])
# create composite vars
self._set_composite_vars(self.get_option('compose'), hostvars, host, strict=strict)
# refetch host vars in case new ones have been created above
hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
if host in self._cache: # adds facts if cache is active
hostvars = combine_vars(hostvars, self._cache[host])
# constructed groups based on conditionals
self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict, fetch_hostvars=False)
# constructed groups based variable values
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=False)
except Exception as e:
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)), orig_exc=e)
| 7,551
|
Python
|
.py
| 137
| 45.394161
| 147
| 0.661739
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,357
|
yaml.py
|
ansible_ansible/lib/ansible/plugins/inventory/yaml.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: yaml
version_added: "2.4"
short_description: Uses a specific YAML file as an inventory source.
description:
- "YAML-based inventory, should start with the C(all) group and contain hosts/vars/children entries."
- Host entries can have sub-entries defined, which will be treated as variables.
- Vars entries are normal group vars.
- "Children are 'child groups', which can also have their own vars/hosts/children and so on."
- File MUST have a valid extension, defined in configuration.
notes:
- If you want to set vars for the C(all) group inside the inventory file, the C(all) group must be the first entry in the file.
- Enabled in configuration by default.
options:
yaml_extensions:
description: list of 'valid' extensions for files containing YAML
type: list
elements: string
default: ['.yaml', '.yml', '.json']
env:
- name: ANSIBLE_YAML_FILENAME_EXT
- name: ANSIBLE_INVENTORY_PLUGIN_EXTS
ini:
- key: yaml_valid_extensions
section: defaults
- section: inventory_plugin_yaml
key: yaml_valid_extensions
"""
EXAMPLES = """
all: # keys must be unique, i.e. only one 'hosts' per group
hosts:
test1:
test2:
host_var: value
vars:
group_all_var: value
children: # key order does not matter, indentation does
other_group:
children:
group_x:
hosts:
test5 # Note that one machine will work without a colon
#group_x:
# hosts:
# test5 # But this won't
# test7 #
group_y:
hosts:
test6: # So always use a colon
vars:
g2_var2: value3
hosts:
test4:
ansible_host: 127.0.0.1
last_group:
hosts:
test1 # same host as above, additional group membership
vars:
group_last_var: value
"""
import os
from collections.abc import MutableMapping
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.plugins.inventory import BaseFileInventoryPlugin
NoneType = type(None)
class InventoryModule(BaseFileInventoryPlugin):
NAME = 'yaml'
def __init__(self):
super(InventoryModule, self).__init__()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in self.get_option('yaml_extensions'):
valid = True
return valid
def parse(self, inventory, loader, path, cache=True):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, path)
self.set_options()
try:
data = self.loader.load_from_file(path, cache='none')
except Exception as e:
raise AnsibleParserError(e)
if not data:
raise AnsibleParserError('Parsed empty YAML file')
elif not isinstance(data, MutableMapping):
raise AnsibleParserError('YAML inventory has invalid structure, it should be a dictionary, got: %s' % type(data))
elif data.get('plugin'):
raise AnsibleParserError('Plugin configuration YAML file, not YAML inventory')
# We expect top level keys to correspond to groups, iterate over them
# to get host, vars and subgroups (which we iterate over recursively)
if isinstance(data, MutableMapping):
for group_name in data:
self._parse_group(group_name, data[group_name])
else:
raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(data))
def _parse_group(self, group, group_data):
if isinstance(group_data, (MutableMapping, NoneType)): # type: ignore[misc]
try:
group = self.inventory.add_group(group)
except AnsibleError as e:
raise AnsibleParserError("Unable to add group %s: %s" % (group, to_text(e)))
if group_data is not None:
# make sure they are dicts
for section in ['vars', 'children', 'hosts']:
if section in group_data:
# convert strings to dicts as these are allowed
if isinstance(group_data[section], string_types):
group_data[section] = {group_data[section]: None}
if not isinstance(group_data[section], (MutableMapping, NoneType)): # type: ignore[misc]
raise AnsibleParserError('Invalid "%s" entry for "%s" group, requires a dictionary, found "%s" instead.' %
(section, group, type(group_data[section])))
for key in group_data:
if not isinstance(group_data[key], (MutableMapping, NoneType)): # type: ignore[misc]
self.display.warning('Skipping key (%s) in group (%s) as it is not a mapping, it is a %s' % (key, group, type(group_data[key])))
continue
if isinstance(group_data[key], NoneType): # type: ignore[misc]
self.display.vvv('Skipping empty key (%s) in group (%s)' % (key, group))
elif key == 'vars':
for var in group_data[key]:
self.inventory.set_variable(group, var, group_data[key][var])
elif key == 'children':
for subgroup in group_data[key]:
subgroup = self._parse_group(subgroup, group_data[key][subgroup])
self.inventory.add_child(group, subgroup)
elif key == 'hosts':
for host_pattern in group_data[key]:
hosts, port = self._parse_host(host_pattern)
self._populate_host_vars(hosts, group_data[key][host_pattern] or {}, group, port)
else:
self.display.warning('Skipping unexpected key (%s) in group (%s), only "vars", "children" and "hosts" are valid' % (key, group))
else:
self.display.warning("Skipping '%s' as this is not a valid group definition" % group)
return group
def _parse_host(self, host_pattern):
"""
Each host key can be a pattern, try to process it and add variables as needed
"""
try:
(hostnames, port) = self._expand_hostpattern(host_pattern)
except TypeError:
raise AnsibleParserError(
f"Host pattern {host_pattern} must be a string. Enclose integers/floats in quotation marks."
)
return hostnames, port
| 7,415
|
Python
|
.py
| 151
| 36.225166
| 152
| 0.579428
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,358
|
script.py
|
ansible_ansible/lib/ansible/plugins/inventory/script.py
|
# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = '''
name: script
version_added: "2.4"
short_description: Executes an inventory script that returns JSON
options:
always_show_stderr:
description: Toggle display of stderr even when script was successful
version_added: "2.5.1"
default: True
type: boolean
ini:
- section: inventory_plugin_script
key: always_show_stderr
env:
- name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_STDERR
description:
- The source provided must be an executable that returns Ansible inventory JSON
- The source must accept C(--list) and C(--host <hostname>) as arguments.
C(--host) will only be used if no C(_meta) key is present.
This is a performance optimization as the script would be called one additional time per host otherwise.
notes:
- Enabled in configuration by default.
- The plugin does not cache results because external inventory scripts are responsible for their own caching.
- To write your own inventory script see (R(Developing dynamic inventory,developing_inventory) from the documentation site.
- To find the scripts that used to be part of the code release, go to U(https://github.com/ansible-community/contrib-scripts/).
- Since 2.19 using a directory as an inventory source will no longer ignore .ini files by default,
but you can still update the configuration to do so.
'''
EXAMPLES = r'''# fmt: code
### simple bash script
#!/usr/bin/env bash
if [ "$1" == "--list" ]; then
cat<<EOF
{
"bash_hosts": {
"hosts": [
"myhost.domain.com",
"myhost2.domain.com"
],
"vars": {
"host_test": "test-value"
}
},
"_meta": {
"hostvars": {
"myhost.domain.com": {
"host_specific_test_var": "test-value"
}
}
}
}
EOF
elif [ "$1" == "--host" ]; then
# this should not normally be called by Ansible as we return _meta above
if [ "$2" == "myhost.domain.com" ]; then
echo '{"_meta": {hostvars": {"myhost.domain.com": {"host_specific-test_var": "test-value"}}}}'
else
echo '{"_meta": {hostvars": {}}}'
fi
else
echo "Invalid option: use --list or --host <hostname>"
exit 1
fi
### python example with ini config
#!/usr/bin/env python
"""
# ansible_inventory.py
"""
import argparse
import json
import os.path
import sys
from configparser import ConfigParser
from inventories.custom import MyInventoryAPI
def load_config() -> ConfigParser:
cp = ConfigParser()
config_file = os.path.expanduser("~/.config/ansible_inventory_script.cfg")
cp.read(config_file)
if not cp.has_option('DEFAULT', 'namespace'):
raise ValueError("Missing configuration option: DEFAULT -> namespace")
return cp
def get_api_data(namespace: str, pretty=False) -> str:
"""
:param namespace: parameter for our custom api
:param pretty: Human redable JSON vs machine readable
:return: JSON string
"""
found_data = list(MyInventoryAPI(namespace))
hostvars = {}
data = { '_meta': { 'hostvars': {}},}
groups = found_data['groups'].keys()
for group in groups:
groups[group]['hosts'] = found_data[groups].get('host_list', [])
if group not in data:
data[group] = {}
data[group]['hosts'] = found_data[groups].get('host_list', [])
data[group]['vars'] = found_data[groups].get('info', [])
data[group]['children'] = found_data[group].get('subgroups', [])
for host_data in found_data['hosts']:
for name in host_data.items():
# turn info into vars
data['_meta'][name] = found_data[name].get('info', {})
# set ansible_host if possible
if 'address' in found_data[name]:
data[name]['_meta']['ansible_host'] = found_data[name]['address']
data['_meta']['hostvars'] = hostvars
return json.dumps(data, indent=pretty)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser( description=__doc__, prog=__file__)
arg_parser.add_argument('--pretty', action='store_true', default=False, help="Pretty JSON")
mandatory_options = arg_parser.add_mutually_exclusive_group()
mandatory_options.add_argument('--list', action='store', nargs="*", help="Get inventory JSON from our API")
mandatory_options.add_argument('--host', action='store',
help="Get variables for specific host, not used but kept for compatibility")
try:
config = load_config()
namespace = config.get('DEFAULT', 'namespace')
args = arg_parser.parse_args()
if args.host:
print('{"_meta":{}}')
sys.stderr.write('This script already provides _meta via --list, so this option is really ignored')
elif len(args.list) >= 0:
print(get_api_data(namespace, args.pretty))
else:
raise ValueError("Valid options are --list or --host <HOSTNAME>")
except ValueError:
raise
'''
import os
import subprocess
from collections.abc import Mapping
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.basic import json_dict_bytes_to_unicode
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.utils.display import Display
display = Display()
class InventoryModule(BaseInventoryPlugin):
""" Host inventory parser for ansible using external inventory scripts. """
NAME = 'script'
def __init__(self):
super(InventoryModule, self).__init__()
self._hosts = set()
def verify_file(self, path):
""" Verify if file is usable by this plugin, base does minimal accessibility check """
valid = super(InventoryModule, self).verify_file(path)
if valid:
# not only accessible, file must be executable and/or have shebang
shebang_present = False
try:
with open(path, 'rb') as inv_file:
initial_chars = inv_file.read(2)
if initial_chars.startswith(b'#!'):
shebang_present = True
except Exception:
pass
if not os.access(path, os.X_OK) and not shebang_present:
valid = False
return valid
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
self.set_options()
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
cmd = [path, "--list"]
try:
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleParserError("problem running %s (%s)" % (' '.join(cmd), to_native(e)))
(stdout, stderr) = sp.communicate()
path = to_native(path)
err = to_native(stderr or "")
if err and not err.endswith('\n'):
err += '\n'
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (path, err))
# make sure script output is unicode so that json loader will output unicode strings itself
try:
data = to_text(stdout, errors="strict")
except Exception as e:
raise AnsibleError("Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}".format(path, to_native(e)))
try:
processed = self.loader.load(data, json_only=True)
except Exception as e:
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}\n{2}".format(path, to_native(e), err))
# if no other errors happened and you want to force displaying stderr, do so now
if stderr and self.get_option('always_show_stderr'):
self.display.error(msg=to_text(err))
if not isinstance(processed, Mapping):
raise AnsibleError("failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}".format(path, err))
group = None
data_from_meta = None
# A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each # host.
# This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
for (group, gdata) in processed.items():
if group == '_meta':
if 'hostvars' in gdata:
data_from_meta = gdata['hostvars']
else:
self._parse_group(group, gdata)
for host in self._hosts:
got = {}
if data_from_meta is None:
got = self.get_host_variables(path, host)
else:
try:
got = data_from_meta.get(host, {})
except AttributeError as e:
raise AnsibleError("Improperly formatted host information for %s: %s" % (host, to_native(e)), orig_exc=e)
self._populate_host_vars([host], got)
except Exception as e:
raise AnsibleParserError(to_native(e))
def _parse_group(self, group, data):
group = self.inventory.add_group(group)
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts', 'vars', 'children')):
data = {'hosts': [group], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
raise AnsibleError("You defined a group '%s' with bad data for the host list:\n %s" % (group, data))
for hostname in data['hosts']:
self._hosts.add(hostname)
self.inventory.add_host(hostname, group)
if 'vars' in data:
if not isinstance(data['vars'], dict):
raise AnsibleError("You defined a group '%s' with bad data for variables:\n %s" % (group, data))
for k, v in data['vars'].items():
self.inventory.set_variable(group, k, v)
if group != '_meta' and isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
child_name = self.inventory.add_group(child_name)
self.inventory.add_child(group, child_name)
def get_host_variables(self, path, host):
""" Runs <script> --host <hostname>, to determine additional host variables """
cmd = [path, "--host", host]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, stderr) = sp.communicate()
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s" % (path, to_native(stderr)))
if out.strip() == '':
return {}
try:
return json_dict_bytes_to_unicode(self.loader.load(out, file_name=path))
except ValueError:
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
| 12,326
|
Python
|
.py
| 258
| 37.077519
| 146
| 0.594935
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,359
|
host_list.py
|
ansible_ansible/lib/ansible/plugins/inventory/host_list.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
name: host_list
version_added: "2.4"
short_description: Parses a 'host list' string
description:
- Parses a host list string as a comma separated values of hosts
- This plugin only applies to inventory strings that are not paths and contain a comma.
"""
EXAMPLES = r"""
# define 2 hosts in command line
# ansible -i '10.10.2.6, 10.10.2.4' -m ping all
# DNS resolvable names
# ansible -i 'host1.example.com, host2' -m user -a 'name=me state=absent' all
# just use localhost
# ansible-playbook -i 'localhost,' play.yml -c local
"""
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
NAME = 'host_list'
def verify_file(self, host_list):
valid = False
b_path = to_bytes(host_list, errors='surrogate_or_strict')
if not os.path.exists(b_path) and ',' in host_list:
valid = True
return valid
def parse(self, inventory, loader, host_list, cache=True):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, host_list)
try:
for h in host_list.split(','):
h = h.strip()
if h:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
host = h
port = None
if host not in self.inventory.hosts:
self.inventory.add_host(host, group='ungrouped', port=port)
except Exception as e:
raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
| 2,252
|
Python
|
.py
| 49
| 36.877551
| 117
| 0.635574
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,360
|
toml.py
|
ansible_ansible/lib/ansible/plugins/inventory/toml.py
|
# Copyright (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = r"""
name: toml
version_added: "2.8"
short_description: Uses a specific TOML file as an inventory source.
description:
- TOML based inventory format
- File MUST have a valid '.toml' file extension
notes:
- >
Requires one of the following python libraries: 'toml', 'tomli', or 'tomllib'
"""
EXAMPLES = r"""# fmt: toml
# Example 1
[all.vars]
has_java = false
[web]
children = [
"apache",
"nginx"
]
vars = { http_port = 8080, myvar = 23 }
[web.hosts]
host1 = {}
host2 = { ansible_port = 222 }
[apache.hosts]
tomcat1 = {}
tomcat2 = { myvar = 34 }
tomcat3 = { mysecret = "03#pa33w0rd" }
[nginx.hosts]
jenkins1 = {}
[nginx.vars]
has_java = true
# Example 2
[all.vars]
has_java = false
[web]
children = [
"apache",
"nginx"
]
[web.vars]
http_port = 8080
myvar = 23
[web.hosts.host1]
[web.hosts.host2]
ansible_port = 222
[apache.hosts.tomcat1]
[apache.hosts.tomcat2]
myvar = 34
[apache.hosts.tomcat3]
mysecret = "03#pa33w0rd"
[nginx.hosts.jenkins1]
[nginx.vars]
has_java = true
# Example 3
[ungrouped.hosts]
host1 = {}
host2 = { ansible_host = "127.0.0.1", ansible_port = 44 }
host3 = { ansible_host = "127.0.0.1", ansible_port = 45 }
[g1.hosts]
host4 = {}
[g2.hosts]
host4 = {}
"""
import os
import typing as t
from collections.abc import MutableMapping, MutableSequence
from functools import partial
from ansible.errors import AnsibleFileNotFound, AnsibleParserError, AnsibleRuntimeError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.six import string_types, text_type
from ansible.parsing.yaml.objects import AnsibleSequence, AnsibleUnicode
from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import AnsibleUnsafeBytes, AnsibleUnsafeText
HAS_TOML = False
try:
import toml
HAS_TOML = True
except ImportError:
pass
HAS_TOMLIW = False
try:
import tomli_w # type: ignore[import]
HAS_TOMLIW = True
except ImportError:
pass
HAS_TOMLLIB = False
try:
import tomllib # type: ignore[import]
HAS_TOMLLIB = True
except ImportError:
try:
import tomli as tomllib # type: ignore[no-redef]
HAS_TOMLLIB = True
except ImportError:
pass
display = Display()
# dumps
if HAS_TOML and hasattr(toml, 'TomlEncoder'):
# toml>=0.10.0
class AnsibleTomlEncoder(toml.TomlEncoder):
def __init__(self, *args, **kwargs):
super(AnsibleTomlEncoder, self).__init__(*args, **kwargs)
# Map our custom YAML object types to dump_funcs from ``toml``
self.dump_funcs.update({
AnsibleSequence: self.dump_funcs.get(list),
AnsibleUnicode: self.dump_funcs.get(str),
AnsibleUnsafeBytes: self.dump_funcs.get(str),
AnsibleUnsafeText: self.dump_funcs.get(str),
})
toml_dumps = partial(toml.dumps, encoder=AnsibleTomlEncoder()) # type: t.Callable[[t.Any], str]
else:
# toml<0.10.0
# tomli-w
def toml_dumps(data): # type: (t.Any) -> str
if HAS_TOML:
return toml.dumps(convert_yaml_objects_to_native(data))
elif HAS_TOMLIW:
return tomli_w.dumps(convert_yaml_objects_to_native(data))
raise AnsibleRuntimeError(
'The python "toml" or "tomli-w" library is required when using the TOML output format'
)
# loads
if HAS_TOML:
# prefer toml if installed, since it supports both encoding and decoding
toml_loads = toml.loads # type: ignore[assignment]
TOMLDecodeError = toml.TomlDecodeError # type: t.Any
elif HAS_TOMLLIB:
toml_loads = tomllib.loads # type: ignore[assignment]
TOMLDecodeError = tomllib.TOMLDecodeError # type: t.Any # type: ignore[no-redef]
def convert_yaml_objects_to_native(obj):
"""Older versions of the ``toml`` python library, and tomllib, don't have
a pluggable way to tell the encoder about custom types, so we need to
ensure objects that we pass are native types.
Used with:
- ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing
- ``tomli`` or ``tomllib``
This function recurses an object and ensures we cast any of the types from
``ansible.parsing.yaml.objects`` into their native types, effectively cleansing
the data before we hand it over to the toml library.
This function doesn't directly check for the types from ``ansible.parsing.yaml.objects``
but instead checks for the types those objects inherit from, to offer more flexibility.
"""
if isinstance(obj, dict):
return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items())
elif isinstance(obj, list):
return [convert_yaml_objects_to_native(v) for v in obj]
elif isinstance(obj, text_type):
return text_type(obj)
else:
return obj
class InventoryModule(BaseFileInventoryPlugin):
NAME = 'toml'
def _parse_group(self, group, group_data):
if group_data is not None and not isinstance(group_data, MutableMapping):
self.display.warning("Skipping '%s' as this is not a valid group definition" % group)
return
group = self.inventory.add_group(group)
if group_data is None:
return
for key, data in group_data.items():
if key == 'vars':
if not isinstance(data, MutableMapping):
raise AnsibleParserError(
'Invalid "vars" entry for "%s" group, requires a dict, found "%s" instead.' %
(group, type(data))
)
for var, value in data.items():
self.inventory.set_variable(group, var, value)
elif key == 'children':
if not isinstance(data, MutableSequence):
raise AnsibleParserError(
'Invalid "children" entry for "%s" group, requires a list, found "%s" instead.' %
(group, type(data))
)
for subgroup in data:
self._parse_group(subgroup, {})
self.inventory.add_child(group, subgroup)
elif key == 'hosts':
if not isinstance(data, MutableMapping):
raise AnsibleParserError(
'Invalid "hosts" entry for "%s" group, requires a dict, found "%s" instead.' %
(group, type(data))
)
for host_pattern, value in data.items():
hosts, port = self._expand_hostpattern(host_pattern)
self._populate_host_vars(hosts, value, group, port)
else:
self.display.warning(
'Skipping unexpected key "%s" in group "%s", only "vars", "children" and "hosts" are valid' %
(key, group)
)
def _load_file(self, file_name):
if not file_name or not isinstance(file_name, string_types):
raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
b_file_name = to_bytes(self.loader.path_dwim(file_name))
if not self.loader.path_exists(b_file_name):
raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
try:
(b_data, private) = self.loader._get_file_contents(file_name)
return toml_loads(to_text(b_data, errors='surrogate_or_strict'))
except TOMLDecodeError as e:
raise AnsibleParserError(
'TOML file (%s) is invalid: %s' % (file_name, to_native(e)),
orig_exc=e
)
except (IOError, OSError) as e:
raise AnsibleParserError(
"An error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)),
orig_exc=e
)
except Exception as e:
raise AnsibleParserError(
"An unexpected error occurred while parsing the file '%s': %s" % (file_name, to_native(e)),
orig_exc=e
)
def parse(self, inventory, loader, path, cache=True):
""" parses the inventory file """
if not HAS_TOMLLIB and not HAS_TOML:
# tomllib works here too, but we don't call it out in the error,
# since you either have it or not as part of cpython stdlib >= 3.11
raise AnsibleParserError(
'The TOML inventory plugin requires the python "toml", or "tomli" library'
)
super(InventoryModule, self).parse(inventory, loader, path)
self.set_options()
try:
data = self._load_file(path)
except Exception as e:
raise AnsibleParserError(e)
if not data:
raise AnsibleParserError('Parsed empty TOML file')
elif data.get('plugin'):
raise AnsibleParserError('Plugin configuration TOML file, not TOML inventory')
for group_name in data:
self._parse_group(group_name, data[group_name])
def verify_file(self, path):
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if ext == '.toml':
return True
return False
| 9,587
|
Python
|
.py
| 246
| 31.02439
| 113
| 0.62831
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,361
|
ini.py
|
ansible_ansible/lib/ansible/plugins/inventory/ini.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: ini
version_added: "2.4"
short_description: Uses an Ansible INI file as inventory source.
description:
- INI file based inventory, sections are groups or group related with special C(:modifiers).
- Entries in sections C([group_1]) are hosts, members of the group.
- Hosts can have variables defined inline as key/value pairs separated by C(=).
- The C(children) modifier indicates that the section contains groups.
- The C(vars) modifier indicates that the section contains variables assigned to members of the group.
- Anything found outside a section is considered an 'ungrouped' host.
- Values passed in the INI format using the C(key=value) syntax are interpreted differently depending on where they are declared within your inventory.
- When declared inline with the host, INI values are processed by Python's ast.literal_eval function
(U(https://docs.python.org/3/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
(strings, numbers, tuples, lists, dicts, booleans, None). If you want a number to be treated as a string, you must quote it.
Host lines accept multiple C(key=value) parameters per line.
Therefore they need a way to indicate that a space is part of a value rather than a separator.
- When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
- Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
- See the Examples for proper quoting to prevent changes to variable type.
notes:
- Enabled in configuration by default.
- Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
The YAML inventory plugin processes variable values consistently and correctly.
"""
EXAMPLES = """# fmt: ini
# Example 1
[web]
host1
host2 ansible_port=222 # defined inline, interpreted as an integer
[web:vars]
http_port=8080 # all members of 'web' will inherit these
myvar=23 # defined in a :vars section, interpreted as a string
[web:children] # child groups will automatically add their hosts to parent group
apache
nginx
[apache]
tomcat1
tomcat2 myvar=34 # host specific vars override group vars
tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
[nginx]
jenkins1
[nginx:vars]
has_java = True # vars in child groups override same in parent
[all:vars]
has_java = False # 'all' is 'top' parent
# Example 2
host1 # this is 'ungrouped'
# both hosts have same IP but diff ports, also 'ungrouped'
host2 ansible_host=127.0.0.1 ansible_port=44
host3 ansible_host=127.0.0.1 ansible_port=45
[g1]
host4
[g2]
host4 # same host as above, but member of 2 groups, will inherit vars from both
# inventory hostnames are unique
"""
import ast
import re
import warnings
from ansible.inventory.group import to_safe_group_name
from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.utils.shlex import shlex_split
class InventoryModule(BaseFileInventoryPlugin):
"""
Takes an INI-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
NAME = 'ini'
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
def __init__(self):
super(InventoryModule, self).__init__()
self.patterns = {}
self._filename = None
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._filename = path
try:
# Read in the hosts, groups, and variables defined in the inventory file.
if self.loader:
(b_data, private) = self.loader._get_file_contents(path)
else:
b_path = to_bytes(path, errors='surrogate_or_strict')
with open(b_path, 'rb') as fh:
b_data = fh.read()
try:
# Faster to do to_text once on a long string than many
# times on smaller strings
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
except UnicodeError:
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
data = []
for line in b_data.splitlines():
if line and line[0] in self.b_COMMENT_MARKERS:
# Replace is okay for comment lines
# data.append(to_text(line, errors='surrogate_then_replace'))
# Currently we only need these lines for accurate lineno in errors
data.append(u'')
else:
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
self._parse(path, data)
except Exception as e:
raise AnsibleParserError(e)
def _raise_error(self, message):
raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
def _parse(self, path, lines):
"""
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
"""
self._compile_patterns()
# We behave as though the first line of the inventory is '[ungrouped]',
# and begin to look for host definitions. We make a single pass through
# each line of the inventory, building up self.groups and adding hosts,
# subgroups, and setting variables as we go.
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
self.lineno = 0
for line in lines:
self.lineno += 1
line = line.strip()
# Skip empty lines and comments
if not line or line[0] in self._COMMENT_MARKERS:
continue
# Is this a [section] header? That tells us what group we're parsing
# definitions for, and what kind of definitions to expect.
m = self.patterns['section'].match(line)
if m:
(groupname, state) = m.groups()
groupname = to_safe_group_name(groupname)
state = state or 'hosts'
if state not in ['hosts', 'children', 'vars']:
title = ":".join(m.groups())
self._raise_error("Section [%s] has unknown type: %s" % (title, state))
# If we haven't seen this group before, we add a new Group.
if groupname not in self.inventory.groups:
# Either [groupname] or [groupname:children] is sufficient to declare a group,
# but [groupname:vars] is allowed only if the # group is declared elsewhere.
# We add the group anyway, but make a note in pending_declarations to check at the end.
#
# It's possible that a group is previously pending due to being defined as a child
# group, in that case we simply pass so that the logic below to process pending
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
self.inventory.add_group(groupname)
# When we see a declaration that we've been waiting for, we process and delete.
if groupname in pending_declarations and state != 'vars':
if pending_declarations[groupname]['state'] == 'children':
self._add_pending_children(groupname, pending_declarations)
elif pending_declarations[groupname]['state'] == 'vars':
del pending_declarations[groupname]
continue
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
"in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
# error if we feed them something they can't digest.
# [groupname] contains host definitions that must be added to
# the current group.
if state == 'hosts':
hosts, port, variables = self._parse_host_definition(line)
self._populate_host_vars(hosts, variables, groupname, port)
# [groupname:vars] contains variable definitions that must be
# applied to the current group.
elif state == 'vars':
(k, v) = self._parse_variable_definition(line)
self.inventory.set_variable(groupname, k, v)
# [groupname:children] contains subgroup names that must be
# added as children of the current group. The subgroup names
# must themselves be declared as groups, but as before, they
# may only be declared later.
elif state == 'children':
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
self.inventory.add_child(groupname, child)
else:
# This can happen only if the state checker accepts a state that isn't handled above.
self._raise_error("Entered unhandled state: %s" % (state))
# Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
if decl['state'] == 'vars':
raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
elif decl['state'] == 'children':
raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
self.inventory.add_child(parent, group)
if parent in pending and pending[parent]['state'] == 'children':
self._add_pending_children(parent, pending)
del pending[group]
def _parse_group_name(self, line):
"""
Takes a single line and tries to parse it as a group name. Returns the
group name if successful, or raises an error.
"""
m = self.patterns['groupname'].match(line)
if m:
return m.group(1)
self._raise_error("Expected group name, got: %s" % (line))
def _parse_variable_definition(self, line):
"""
Takes a string and tries to parse it as a variable definition. Returns
the key and value if successful, or raises an error.
"""
# TODO: We parse variable assignments as a key (anything to the left of
# an '='"), an '=', and a value (anything left) and leave the value to
# _parse_value to sort out. We should be more systematic here about
# defining what is acceptable, how quotes work, and so on.
if '=' in line:
(k, v) = [e.strip() for e in line.split("=", 1)]
return (k, self._parse_value(v))
self._raise_error("Expected key=value, got: %s" % (line))
def _parse_host_definition(self, line):
"""
Takes a single line and tries to parse it as a host definition. Returns
a list of Hosts if successful, or raises an error.
"""
# A host definition comprises (1) a non-whitespace hostname or range,
# optionally followed by (2) a series of key="some value" assignments.
# We ignore any trailing whitespace and/or comments. For example, here
# are a series of host definitions in a group:
#
# [groupname]
# alpha
# beta:2345 user=admin # we'll tell shlex
# gamma sudo=True user=root # to ignore comments
try:
tokens = shlex_split(line, comments=True)
except ValueError as e:
self._raise_error("Error parsing host definition '%s': %s" % (line, e))
(hostnames, port) = self._expand_hostpattern(tokens[0])
# Try to process anything remaining as a series of key=value pairs.
variables = {}
for t in tokens[1:]:
if '=' not in t:
self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
(k, v) = t.split('=', 1)
variables[k] = self._parse_value(v)
return hostnames, port, variables
def _expand_hostpattern(self, hostpattern):
"""
do some extra checks over normal processing
"""
# specification?
hostnames, port = super(InventoryModule, self)._expand_hostpattern(hostpattern)
if hostpattern.strip().endswith(':') and port is None:
raise AnsibleParserError("Invalid host pattern '%s' supplied, ending in ':' is not allowed, this character is reserved to provide a port." %
hostpattern)
for pattern in hostnames:
# some YAML parsing prevention checks
if pattern.strip() == '---':
raise AnsibleParserError("Invalid host pattern '%s' supplied, '---' is normally a sign this is a YAML file." % hostpattern)
return (hostnames, port)
@staticmethod
def _parse_value(v):
"""
Attempt to transform the string value from an ini file into a basic python object
(int, dict, list, unicode string, etc).
"""
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", SyntaxWarning)
v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We will then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
def _compile_patterns(self):
"""
Compiles the regular expressions required to parse the inventory and
stores them in self.patterns.
"""
# Section names are square-bracketed expressions at the beginning of a
# line, comprising (1) a group name optionally followed by (2) a tag
# that specifies the contents of the section. We ignore any trailing
# whitespace and/or comments. For example:
#
# [groupname]
# [somegroup:vars]
# [naughty:children] # only get coal in their stockings
self.patterns['section'] = re.compile(
to_text(r"""^\[
([^:\]\s]+) # group name (see groupname below)
(?::(\w+))? # optional : and tag name
\]
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
""", errors='surrogate_or_strict'), re.X
)
# FIXME: What are the real restrictions on group names, or rather, what
# should they be? At the moment, they must be non-empty sequences of non
# whitespace characters excluding ':' and ']', but we should define more
# precise rules in order to support better diagnostics.
self.patterns['groupname'] = re.compile(
to_text(r"""^
([^:\]\s]+)
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
""", errors='surrogate_or_strict'), re.X
)
| 17,553
|
Python
|
.py
| 326
| 42.717791
| 159
| 0.611202
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,362
|
mathstuff.py
|
ansible_ansible/lib/ansible/plugins/test/mathstuff.py
|
# (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import math
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
def isnotanumber(x):
try:
return math.isnan(x)
except TypeError:
return False
def contains(seq, value):
"""Opposite of the ``in`` test, allowing use as a test in filters like ``selectattr``
.. versionadded:: 2.8
"""
return value in seq
class TestModule:
""" Ansible math jinja2 tests """
def tests(self):
return {
# set theory
'subset': issubset,
'issubset': issubset,
'superset': issuperset,
'issuperset': issuperset,
'contains': contains,
# numbers
'nan': isnotanumber,
'isnan': isnotanumber,
}
| 1,508
|
Python
|
.py
| 46
| 27.543478
| 89
| 0.664824
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,363
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/test/__init__.py
|
# (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.plugins import AnsibleJinja2Plugin
class AnsibleJinja2Test(AnsibleJinja2Plugin):
def _no_options(self, *args, **kwargs):
raise NotImplementedError("Jinaj2 test plugins do not support option functions, they use direct arguments instead.")
| 418
|
Python
|
.py
| 7
| 56.285714
| 124
| 0.780788
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,364
|
uri.py
|
ansible_ansible/lib/ansible/plugins/test/uri.py
|
# (c) Ansible Project
from __future__ import annotations
from urllib.parse import urlparse
def is_uri(value, schemes=None):
""" Will verify that the string passed is a valid 'URI', if given a list of valid schemes it will match those """
try:
x = urlparse(value)
isit = all([x.scheme is not None, x.path is not None, not schemes or x.scheme in schemes])
except Exception as e:
isit = False
return isit
def is_url(value, schemes=None):
""" Will verify that the string passed is a valid 'URL' """
isit = is_uri(value, schemes)
if isit:
try:
x = urlparse(value)
isit = bool(x.netloc or x.scheme == 'file')
except Exception as e:
isit = False
return isit
def is_urn(value):
return is_uri(value, ['urn'])
class TestModule(object):
""" Ansible URI jinja2 test """
def tests(self):
return {
# file testing
'uri': is_uri,
'url': is_url,
'urn': is_urn,
}
| 1,042
|
Python
|
.py
| 32
| 25.4375
| 117
| 0.596192
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,365
|
core.py
|
ansible_ansible/lib/ansible/plugins/test/core.py
|
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import re
import operator as py_operator
from collections.abc import MutableMapping, MutableSequence
from ansible.module_utils.compat.version import LooseVersion, StrictVersion
from ansible import errors
from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.vault import is_encrypted_file
from ansible.utils.display import Display
from ansible.utils.version import SemanticVersion
try:
from packaging.version import Version as PEP440Version
HAS_PACKAGING = True
except ImportError:
HAS_PACKAGING = False
display = Display()
def timedout(result):
""" Test if task result yields a time out"""
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'timedout' test expects a dictionary")
return result.get('timedout', False) and result['timedout'].get('period', False)
def failed(result):
""" Test if task result yields failed """
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'failed' test expects a dictionary")
return result.get('failed', False)
def success(result):
""" Test if task result yields success """
return not failed(result)
def unreachable(result):
""" Test if task result yields unreachable """
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'unreachable' test expects a dictionary")
return result.get('unreachable', False)
def reachable(result):
""" Test if task result yields reachable """
return not unreachable(result)
def changed(result):
""" Test if task result yields changed """
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'changed' test expects a dictionary")
if 'changed' not in result:
changed = False
if (
'results' in result and # some modules return a 'results' key
isinstance(result['results'], MutableSequence) and
isinstance(result['results'][0], MutableMapping)
):
for res in result['results']:
if res.get('changed', False):
changed = True
break
else:
changed = result.get('changed', False)
return changed
def skipped(result):
""" Test if task result yields skipped """
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'skipped' test expects a dictionary")
return result.get('skipped', False)
def started(result):
""" Test if async task has started """
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'started' test expects a dictionary")
if 'started' in result:
# For async tasks, return status
# NOTE: The value of started is 0 or 1, not False or True :-/
return result.get('started', 0) == 1
else:
# For non-async tasks, warn user, but return as if started
display.warning("The 'started' test expects an async task, but a non-async task was tested")
return True
def finished(result):
""" Test if async task has finished """
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'finished' test expects a dictionary")
if 'finished' in result:
# For async tasks, return status
# NOTE: The value of finished is 0 or 1, not False or True :-/
return result.get('finished', 0) == 1
else:
# For non-async tasks, warn user, but return as if finished
display.warning("The 'finished' test expects an async task, but a non-async task was tested")
return True
def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):
""" Expose `re` as a boolean filter using the `search` method by default.
This is likely only useful for `search` and `match` which already
have their own filters.
"""
# In addition to ensuring the correct type, to_text here will ensure
# _fail_with_undefined_error happens if the value is Undefined
value = to_text(value, errors='surrogate_or_strict')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
_re = re.compile(pattern, flags=flags)
return bool(getattr(_re, match_type, 'search')(value))
def vault_encrypted(value):
"""Evaluate whether a variable is a single vault encrypted value
.. versionadded:: 2.10
"""
return getattr(value, '__ENCRYPTED__', False) and value.is_encrypted()
def vaulted_file(value):
"""Evaluate whether a file is a vault
.. versionadded:: 2.18
"""
try:
with open(to_bytes(value), 'rb') as f:
return is_encrypted_file(f)
except (OSError, IOError) as e:
raise errors.AnsibleFilterError(f"Cannot test if the file {value} is a vault", orig_exc=e)
def match(value, pattern='', ignorecase=False, multiline=False):
""" Perform a `re.match` returning a boolean """
return regex(value, pattern, ignorecase, multiline, 'match')
def search(value, pattern='', ignorecase=False, multiline=False):
""" Perform a `re.search` returning a boolean """
return regex(value, pattern, ignorecase, multiline, 'search')
def version_compare(value, version, operator='eq', strict=None, version_type=None):
""" Perform a version comparison on a value """
op_map = {
'==': 'eq', '=': 'eq', 'eq': 'eq',
'<': 'lt', 'lt': 'lt',
'<=': 'le', 'le': 'le',
'>': 'gt', 'gt': 'gt',
'>=': 'ge', 'ge': 'ge',
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
}
type_map = {
'loose': LooseVersion,
'strict': StrictVersion,
'semver': SemanticVersion,
'semantic': SemanticVersion,
'pep440': PEP440Version,
}
if strict is not None and version_type is not None:
raise errors.AnsibleFilterError("Cannot specify both 'strict' and 'version_type'")
if not value:
raise errors.AnsibleFilterError("Input version value cannot be empty")
if not version:
raise errors.AnsibleFilterError("Version parameter to compare against cannot be empty")
if version_type == 'pep440' and not HAS_PACKAGING:
raise errors.AnsibleFilterError("The pep440 version_type requires the Python 'packaging' library")
Version = LooseVersion
if strict:
Version = StrictVersion
elif version_type:
try:
Version = type_map[version_type]
except KeyError:
raise errors.AnsibleFilterError(
"Invalid version type (%s). Must be one of %s" % (version_type, ', '.join(map(repr, type_map)))
)
if operator in op_map:
operator = op_map[operator]
else:
raise errors.AnsibleFilterError(
'Invalid operator type (%s). Must be one of %s' % (operator, ', '.join(map(repr, op_map)))
)
try:
method = getattr(py_operator, operator)
return method(Version(to_text(value)), Version(to_text(version)))
except Exception as e:
raise errors.AnsibleFilterError('Version comparison failed: %s' % to_native(e))
def truthy(value, convert_bool=False):
"""Evaluate as value for truthiness using python ``bool``
Optionally, attempt to do a conversion to bool from boolean like values
such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
.. versionadded:: 2.10
"""
if convert_bool:
try:
value = boolean(value)
except TypeError:
pass
return bool(value)
def falsy(value, convert_bool=False):
"""Evaluate as value for falsiness using python ``bool``
Optionally, attempt to do a conversion to bool from boolean like values
such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
.. versionadded:: 2.10
"""
return not truthy(value, convert_bool=convert_bool)
class TestModule(object):
""" Ansible core jinja2 tests """
def tests(self):
return {
# failure testing
'failed': failed,
'failure': failed,
'succeeded': success,
'success': success,
'successful': success,
'reachable': reachable,
'unreachable': unreachable,
'timedout': timedout,
# changed testing
'changed': changed,
'change': changed,
# skip testing
'skipped': skipped,
'skip': skipped,
# async testing
'finished': finished,
'started': started,
# regex
'match': match,
'search': search,
'regex': regex,
# version comparison
'version_compare': version_compare,
'version': version_compare,
# lists
'any': any,
'all': all,
# truthiness
'truthy': truthy,
'falsy': falsy,
# vault
'vault_encrypted': vault_encrypted,
'vaulted_file': vaulted_file,
}
| 9,979
|
Python
|
.py
| 240
| 34.416667
| 111
| 0.646195
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,366
|
files.py
|
ansible_ansible/lib/ansible/plugins/test/files.py
|
# (c) 2015, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from os.path import isdir, isfile, isabs, exists, lexists, islink, samefile, ismount
class TestModule(object):
""" Ansible file jinja2 tests """
def tests(self):
return {
# file testing
'directory': isdir,
'is_dir': isdir,
'file': isfile,
'is_file': isfile,
'link': islink,
'is_link': islink,
'exists': exists,
'link_exists': lexists,
# path testing
'abs': isabs,
'is_abs': isabs,
'same_file': samefile,
'is_same_file': samefile,
'mount': ismount,
'is_mount': ismount,
}
| 1,407
|
Python
|
.py
| 39
| 29.384615
| 84
| 0.633627
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,367
|
host_group_vars.py
|
ansible_ansible/lib/ansible/plugins/vars/host_group_vars.py
|
# Copyright 2017 RedHat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import annotations
DOCUMENTATION = """
name: host_group_vars
version_added: "2.4"
short_description: In charge of loading group_vars and host_vars
requirements:
- Enabled in configuration
description:
- Loads YAML vars into corresponding groups/hosts in group_vars/ and host_vars/ directories.
- Files are restricted by extension to one of .yaml, .json, .yml or no extension.
- Hidden (starting with '.') and backup (ending with '~') files and directories are ignored.
- Only applies to inventory sources that are existing paths.
- Starting in 2.10, this plugin requires enabling and is enabled by default.
options:
stage:
ini:
- key: stage
section: vars_host_group_vars
env:
- name: ANSIBLE_VARS_PLUGIN_STAGE
_valid_extensions:
default: [".yml", ".yaml", ".json"]
description:
- "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
- 'This affects vars_files, include_vars, inventory and vars plugins among others.'
env:
- name: ANSIBLE_YAML_FILENAME_EXT
ini:
- key: yaml_valid_extensions
section: defaults
type: list
elements: string
extends_documentation_fragment:
- vars_plugin_staging
"""
import os
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.vars import BaseVarsPlugin
from ansible.utils.path import basedir
from ansible.inventory.group import InventoryObjectType
from ansible.utils.vars import combine_vars
CANONICAL_PATHS = {} # type: dict[str, str]
FOUND = {} # type: dict[str, list[str]]
NAK = set() # type: set[str]
PATH_CACHE = {} # type: dict[tuple[str, str], str]
class VarsModule(BaseVarsPlugin):
REQUIRES_ENABLED = True
is_stateless = True
def load_found_files(self, loader, data, found_files):
for found in found_files:
new_data = loader.load_from_file(found, cache='all', unsafe=True)
if new_data: # ignore empty files
data = combine_vars(data, new_data)
return data
def get_vars(self, loader, path, entities, cache=True):
""" parses the inventory file """
if not isinstance(entities, list):
entities = [entities]
# realpath is expensive
try:
realpath_basedir = CANONICAL_PATHS[path]
except KeyError:
CANONICAL_PATHS[path] = realpath_basedir = os.path.realpath(basedir(path))
data = {}
for entity in entities:
try:
entity_name = entity.name
except AttributeError:
raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity)))
try:
first_char = entity_name[0]
except (TypeError, IndexError, KeyError):
raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity)))
# avoid 'chroot' type inventory hostnames /path/to/chroot
if first_char != os.path.sep:
try:
found_files = []
# load vars
try:
entity_type = entity.base_type
except AttributeError:
raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity)))
if entity_type is InventoryObjectType.HOST:
subdir = 'host_vars'
elif entity_type is InventoryObjectType.GROUP:
subdir = 'group_vars'
else:
raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity)))
if cache:
try:
opath = PATH_CACHE[(realpath_basedir, subdir)]
except KeyError:
opath = PATH_CACHE[(realpath_basedir, subdir)] = os.path.join(realpath_basedir, subdir)
if opath in NAK:
continue
key = '%s.%s' % (entity_name, opath)
if key in FOUND:
data = self.load_found_files(loader, data, FOUND[key])
continue
else:
opath = PATH_CACHE[(realpath_basedir, subdir)] = os.path.join(realpath_basedir, subdir)
if os.path.isdir(opath):
self._display.debug("\tprocessing dir %s" % opath)
FOUND[key] = found_files = loader.find_vars_files(opath, entity_name)
elif not os.path.exists(opath):
# cache missing dirs so we don't have to keep looking for things beneath the
NAK.add(opath)
else:
self._display.warning("Found %s that is not a directory, skipping: %s" % (subdir, opath))
# cache non-directory matches
NAK.add(opath)
data = self.load_found_files(loader, data, found_files)
except Exception as e:
raise AnsibleParserError(to_native(e))
return data
| 6,284
|
Python
|
.py
| 133
| 35.323308
| 136
| 0.589204
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,368
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/vars/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.plugins import AnsiblePlugin
from ansible.utils.path import basedir
from ansible.utils.display import Display
display = Display()
class BaseVarsPlugin(AnsiblePlugin):
"""
Loads variables for groups and/or hosts
"""
is_stateless = False
def __init__(self):
""" constructor """
super(BaseVarsPlugin, self).__init__()
self._display = display
def get_vars(self, loader, path, entities):
""" Gets variables. """
self._basedir = basedir(path)
| 1,331
|
Python
|
.py
| 34
| 36.058824
| 70
| 0.735659
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,369
|
su.py
|
ansible_ansible/lib/ansible/plugins/become/su.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: su
short_description: Substitute User
description:
- This become plugin allows your remote/login user to execute commands as another user via the su utility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
default: root
ini:
- section: privilege_escalation
key: become_user
- section: su_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_su_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_SU_USER
keyword:
- name: become_user
become_exe:
description: Su executable
default: su
ini:
- section: privilege_escalation
key: become_exe
- section: su_become_plugin
key: executable
vars:
- name: ansible_become_exe
- name: ansible_su_exe
env:
- name: ANSIBLE_BECOME_EXE
- name: ANSIBLE_SU_EXE
keyword:
- name: become_exe
become_flags:
description: Options to pass to su
default: ''
ini:
- section: privilege_escalation
key: become_flags
- section: su_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_su_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_SU_FLAGS
keyword:
- name: become_flags
become_pass:
description: Password to pass to su
required: False
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_su_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_SU_PASS
ini:
- section: su_become_plugin
key: password
prompt_l10n:
description:
- List of localized strings to match for prompt detection
- If empty we'll use the built in one
- Do NOT add a colon (:) to your custom entries. Ansible adds a colon at the end of each prompt;
if you add another one in your string, your prompt will fail with a "Timeout" error.
default: []
type: list
elements: string
ini:
- section: su_become_plugin
key: localized_prompts
vars:
- name: ansible_su_prompt_l10n
env:
- name: ANSIBLE_SU_PROMPT_L10N
"""
import re
import shlex
from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'su'
# messages for detecting prompted password issues
fail = ('Authentication failure',)
SU_PROMPT_LOCALIZATIONS = [
'Password',
'암호',
'パスワード',
'Adgangskode',
'Contraseña',
'Contrasenya',
'Hasło',
'Heslo',
'Jelszó',
'Lösenord',
'Mật khẩu',
'Mot de passe',
'Parola',
'Parool',
'Pasahitza',
'Passord',
'Passwort',
'Salasana',
'Sandi',
'Senha',
'Wachtwoord',
'ססמה',
'Лозинка',
'Парола',
'Пароль',
'गुप्तशब्द',
'शब्दकूट',
'సంకేతపదము',
'හස්පදය',
'密码',
'密碼',
'口令',
]
def check_password_prompt(self, b_output):
""" checks if the expected password prompt exists in b_output """
prompts = self.get_option('prompt_l10n') or self.SU_PROMPT_LOCALIZATIONS
b_password_string = b"|".join((br'(\w+\'s )?' + to_bytes(p)) for p in prompts)
# Colon or unicode fullwidth colon
b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
b_su_prompt_localizations_re = re.compile(b_password_string, flags=re.IGNORECASE)
return bool(b_su_prompt_localizations_re.match(b_output))
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
# Prompt handling for ``su`` is more complicated, this
# is used to satisfy the connection plugin
self.prompt = True
if not cmd:
return cmd
exe = self.get_option('become_exe') or self.name
flags = self.get_option('become_flags') or ''
user = self.get_option('become_user') or ''
success_cmd = self._build_success_command(cmd, shell)
return "%s %s %s -c %s" % (exe, flags, user, shlex.quote(success_cmd))
| 5,326
|
Python
|
.py
| 152
| 23.421053
| 114
| 0.537118
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,370
|
sudo.py
|
ansible_ansible/lib/ansible/plugins/become/sudo.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: sudo
short_description: Substitute User DO
description:
- This become plugin allows your remote/login user to execute commands as another user via the sudo utility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
default: root
ini:
- section: privilege_escalation
key: become_user
- section: sudo_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_sudo_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_SUDO_USER
keyword:
- name: become_user
become_exe:
description: Sudo executable
default: sudo
ini:
- section: privilege_escalation
key: become_exe
- section: sudo_become_plugin
key: executable
vars:
- name: ansible_become_exe
- name: ansible_sudo_exe
env:
- name: ANSIBLE_BECOME_EXE
- name: ANSIBLE_SUDO_EXE
keyword:
- name: become_exe
become_flags:
description: Options to pass to sudo
default: -H -S -n
ini:
- section: privilege_escalation
key: become_flags
- section: sudo_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_sudo_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_SUDO_FLAGS
keyword:
- name: become_flags
become_pass:
description: Password to pass to sudo
required: False
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_sudo_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_SUDO_PASS
ini:
- section: sudo_become_plugin
key: password
"""
import re
import shlex
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'sudo'
# messages for detecting prompted password issues
fail = ('Sorry, try again.',)
missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
if not cmd:
return cmd
becomecmd = self.get_option('become_exe') or self.name
flags = self.get_option('become_flags') or ''
prompt = ''
if self.get_option('become_pass'):
self.prompt = '[sudo via ansible, key=%s] password:' % self._id
if flags: # this could be simplified, but kept as is for now for backwards string matching
reflag = []
for flag in shlex.split(flags):
if flag in ('-n', '--non-interactive'):
continue
elif not flag.startswith('--'):
# handle -XnxxX flags only
flag = re.sub(r'^(-\w*)n(\w*.*)', r'\1\2', flag)
reflag.append(flag)
flags = shlex.join(reflag)
prompt = '-p "%s"' % (self.prompt)
user = self.get_option('become_user') or ''
if user:
user = '-u %s' % (user)
return ' '.join([becomecmd, flags, prompt, user, self._build_success_command(cmd, shell)])
| 3,982
|
Python
|
.py
| 106
| 25.471698
| 116
| 0.529518
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,371
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/become/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import shlex
from abc import abstractmethod
from secrets import choice
from string import ascii_lowercase
from gettext import dgettext
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins import AnsiblePlugin
def _gen_id(length=32):
""" return random string used to identify the current privilege escalation """
return ''.join(choice(ascii_lowercase) for x in range(length))
class BecomeBase(AnsiblePlugin):
name = None # type: str | None
# messages for detecting prompted password issues
fail = tuple() # type: tuple[str, ...]
missing = tuple() # type: tuple[str, ...]
# many connection plugins cannot provide tty, set to True if your become
# plugin requires a tty, i.e su
require_tty = False
# prompt to match
prompt = ''
def __init__(self):
super(BecomeBase, self).__init__()
self._id = ''
self.success = ''
def get_option(self, option, hostvars=None, playcontext=None):
""" Overrides the base get_option to provide a fallback to playcontext vars in case a 3rd party plugin did not
implement the base become options required in Ansible. """
# TODO: add deprecation warning for ValueError in devel that removes the playcontext fallback
try:
return super(BecomeBase, self).get_option(option, hostvars=hostvars)
except KeyError:
pc_fallback = ['become_user', 'become_pass', 'become_flags', 'become_exe']
if option not in pc_fallback:
raise
return getattr(playcontext, option, None)
def expect_prompt(self):
"""This function assists connection plugins in determining if they need to wait for
a prompt. Both a prompt and a password are required.
"""
return self.prompt and self.get_option('become_pass')
def _build_success_command(self, cmd, shell, noexe=False):
if not all((cmd, shell, self.success)):
return cmd
try:
cmd = shlex.quote('%s %s %s %s' % (shell.ECHO, self.success, shell.COMMAND_SEP, cmd))
except AttributeError:
# TODO: This should probably become some more robust functionality used to detect incompat
raise AnsibleError('The %s shell family is incompatible with the %s become plugin' % (shell.SHELL_FAMILY, self.name))
exe = getattr(shell, 'executable', None)
if exe and not noexe:
cmd = '%s -c %s' % (exe, cmd)
return cmd
@abstractmethod
def build_become_command(self, cmd, shell):
self._id = _gen_id()
self.success = 'BECOME-SUCCESS-%s' % self._id
def check_success(self, b_output):
b_success = to_bytes(self.success)
return any(b_success in l.rstrip() for l in b_output.splitlines(True))
def check_password_prompt(self, b_output):
""" checks if the expected password prompt exists in b_output """
if self.prompt:
b_prompt = to_bytes(self.prompt).strip()
return any(l.strip().startswith(b_prompt) for l in b_output.splitlines())
return False
def _check_password_error(self, b_out, msg):
""" returns True/False if domain specific i18n version of msg is found in b_out """
b_fail = to_bytes(dgettext(self.name, msg))
return b_fail and b_fail in b_out
def check_incorrect_password(self, b_output):
for errstring in self.fail:
if self._check_password_error(b_output, errstring):
return True
return False
def check_missing_password(self, b_output):
for errstring in self.missing:
if self._check_password_error(b_output, errstring):
return True
return False
| 4,009
|
Python
|
.py
| 84
| 39.928571
| 129
| 0.659405
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,372
|
runas.py
|
ansible_ansible/lib/ansible/plugins/become/runas.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: runas
short_description: Run As user
description:
- This become plugin allows your remote/login user to execute commands as another user via the windows runas facility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
ini:
- section: privilege_escalation
key: become_user
- section: runas_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_runas_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_RUNAS_USER
keyword:
- name: become_user
required: True
become_flags:
description: Options to pass to runas, a space delimited list of k=v pairs
default: ''
ini:
- section: privilege_escalation
key: become_flags
- section: runas_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_runas_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_RUNAS_FLAGS
keyword:
- name: become_flags
become_pass:
description: password
ini:
- section: runas_become_plugin
key: password
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_runas_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_RUNAS_PASS
notes:
- runas is really implemented in the powershell module handler and as such can only be used with winrm connections.
- This plugin ignores the 'become_exe' setting as it uses an API and not an executable.
- The Secondary Logon service (seclogon) must be running to use runas
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'runas'
def build_become_command(self, cmd, shell):
# this is a noop, the 'real' runas is implemented
# inside the windows powershell execution subsystem
return cmd
| 2,564
|
Python
|
.py
| 68
| 26.794118
| 126
| 0.57751
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,373
|
template.py
|
ansible_ansible/lib/ansible/plugins/action/template.py
|
# Copyright: (c) 2015, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import shutil
import stat
import tempfile
from jinja2.defaults import (
BLOCK_END_STRING,
BLOCK_START_STRING,
COMMENT_END_STRING,
COMMENT_START_STRING,
VARIABLE_END_STRING,
VARIABLE_START_STRING,
)
from ansible import constants as C
from ansible.config.manager import ensure_type
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.plugins.action import ActionBase
from ansible.template import generate_ansible_template_vars, AnsibleEnvironment
class ActionModule(ActionBase):
TRANSFERS_FILES = True
DEFAULT_NEWLINE_SEQUENCE = "\n"
def run(self, tmp=None, task_vars=None):
""" handler for template operations """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Options type validation
# stings
for s_type in ('src', 'dest', 'state', 'newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
'block_end_string', 'comment_start_string', 'comment_end_string'):
if s_type in self._task.args:
value = ensure_type(self._task.args[s_type], 'string')
if value is not None and not isinstance(value, string_types):
raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
self._task.args[s_type] = value
# booleans
try:
follow = boolean(self._task.args.get('follow', False), strict=False)
trim_blocks = boolean(self._task.args.get('trim_blocks', True), strict=False)
lstrip_blocks = boolean(self._task.args.get('lstrip_blocks', False), strict=False)
except TypeError as e:
raise AnsibleActionFail(to_native(e))
# assign to local vars for ease of use
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
state = self._task.args.get('state', None)
newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
variable_start_string = self._task.args.get('variable_start_string', VARIABLE_START_STRING)
variable_end_string = self._task.args.get('variable_end_string', VARIABLE_END_STRING)
block_start_string = self._task.args.get('block_start_string', BLOCK_START_STRING)
block_end_string = self._task.args.get('block_end_string', BLOCK_END_STRING)
comment_start_string = self._task.args.get('comment_start_string', COMMENT_START_STRING)
comment_end_string = self._task.args.get('comment_end_string', COMMENT_END_STRING)
output_encoding = self._task.args.get('output_encoding', 'utf-8') or 'utf-8'
wrong_sequences = ["\\n", "\\r", "\\r\\n"]
allowed_sequences = ["\n", "\r", "\r\n"]
# We need to convert unescaped sequences to proper escaped sequences for Jinja2
if newline_sequence in wrong_sequences:
newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
try:
# logical validation
if state is not None:
raise AnsibleActionFail("'state' cannot be specified on a template")
elif source is None or dest is None:
raise AnsibleActionFail("src and dest are required")
elif newline_sequence not in allowed_sequences:
raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
else:
try:
source = self._find_needle('templates', source)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
mode = self._task.args.get('mode', None)
if mode == 'preserve':
mode = '0%03o' % stat.S_IMODE(os.stat(source).st_mode)
# Get vault decrypted tmp file
try:
tmp_source = self._loader.get_real_file(source)
except AnsibleFileNotFound as e:
raise AnsibleActionFail("could not find src=%s, %s" % (source, to_text(e)))
b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
# template the source data locally & get ready to transfer
try:
with open(b_tmp_source, 'rb') as f:
try:
template_data = to_text(f.read(), errors='surrogate_or_strict')
except UnicodeError:
raise AnsibleActionFail("Template source files must be utf-8 encoded")
# set jinja2 internal search path for includes
searchpath = task_vars.get('ansible_search_path', [])
searchpath.extend([self._loader._basedir, os.path.dirname(source)])
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
# add ansible 'template' vars
temp_vars = task_vars.copy()
# NOTE in the case of ANSIBLE_DEBUG=1 task_vars is VarsWithSources(MutableMapping)
# so | operator cannot be used as it can be used only on dicts
# https://peps.python.org/pep-0584/#what-about-mapping-and-mutablemapping
temp_vars.update(generate_ansible_template_vars(self._task.args.get('src', None), source, dest))
# force templar to use AnsibleEnvironment to prevent issues with native types
# https://github.com/ansible/ansible/issues/46169
templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment,
searchpath=searchpath,
newline_sequence=newline_sequence,
available_variables=temp_vars)
overrides = dict(
block_start_string=block_start_string,
block_end_string=block_end_string,
variable_start_string=variable_start_string,
variable_end_string=variable_end_string,
comment_start_string=comment_start_string,
comment_end_string=comment_end_string,
trim_blocks=trim_blocks,
lstrip_blocks=lstrip_blocks
)
resultant = templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False, overrides=overrides)
except AnsibleAction:
raise
except Exception as e:
raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
finally:
self._loader.cleanup_tmp_file(b_tmp_source)
new_task = self._task.copy()
# mode is either the mode from task.args or the mode of the source file if the task.args
# mode == 'preserve'
new_task.args['mode'] = mode
# remove 'template only' options:
for remove in ('newline_sequence', 'block_start_string', 'block_end_string', 'variable_start_string', 'variable_end_string',
'comment_start_string', 'comment_end_string', 'trim_blocks', 'lstrip_blocks', 'output_encoding'):
new_task.args.pop(remove, None)
local_tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
try:
result_file = os.path.join(local_tempdir, os.path.basename(source))
with open(to_bytes(result_file, errors='surrogate_or_strict'), 'wb') as f:
f.write(to_bytes(resultant, encoding=output_encoding, errors='surrogate_or_strict'))
new_task.args.update(
dict(
src=result_file,
dest=dest,
follow=follow,
),
)
# call with ansible.legacy prefix to eliminate collisions with collections while still allowing local override
copy_action = self._shared_loader_obj.action_loader.get('ansible.legacy.copy',
task=new_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj)
result.update(copy_action.run(task_vars=task_vars))
finally:
shutil.rmtree(to_bytes(local_tempdir, errors='surrogate_or_strict'))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 9,979
|
Python
|
.py
| 170
| 42.676471
| 142
| 0.5792
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,374
|
include_vars.py
|
ansible_ansible/lib/ansible/plugins/action/include_vars.py
|
# Copyright: (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from os import path, walk
import re
import pathlib
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.plugins.action import ActionBase
from ansible.utils.vars import combine_vars
class ActionModule(ActionBase):
TRANSFERS_FILES = False
VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions', 'ignore_unknown_extensions']
VALID_FILE_ARGUMENTS = ['file', '_raw_params']
VALID_ALL = ['name', 'hash_behaviour']
_requires_connection = False
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, string_types):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed """
self.hash_behaviour = self._task.args.get('hash_behaviour', None)
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
if self.source_file:
self.source_file = self.source_file.rstrip('\n')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_unknown_extensions = self._task.args.get('ignore_unknown_extensions', False)
self.ignore_files = self._task.args.get('ignore_files', None)
self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
# convert/validate extensions list
if isinstance(self.valid_extensions, string_types):
self.valid_extensions = list(self.valid_extensions)
if not isinstance(self.valid_extensions, list):
raise AnsibleError('Invalid type for "extensions" option, it must be a list')
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
del tmp # tmp no longer has any effect
if task_vars is None:
task_vars = dict()
self.show_content = True
self.included_files = []
# Validate arguments
dirs = 0
files = 0
for arg in self._task.args:
if arg in self.VALID_DIR_ARGUMENTS:
dirs += 1
elif arg in self.VALID_FILE_ARGUMENTS:
files += 1
elif arg in self.VALID_ALL:
pass
else:
raise AnsibleError('{0} is not a valid option in include_vars'.format(to_native(arg)))
if dirs and files:
raise AnsibleError("You are mixing file only and dir only arguments, these are incompatible")
# set internal vars from args
self._set_args()
results = dict()
failed = False
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if not path.exists(self.source_dir):
failed = True
err_msg = ('{0} directory does not exist'.format(to_native(self.source_dir)))
elif not path.isdir(self.source_dir):
failed = True
err_msg = ('{0} is not a directory'.format(to_native(self.source_dir)))
else:
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
if failed:
break
results.update(updated_results)
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(task_vars=task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
elif self.hash_behaviour is not None and self.hash_behaviour != C.DEFAULT_HASH_BEHAVIOUR:
merge_hashes = self.hash_behaviour == 'merge'
for key, value in results.items():
old_value = task_vars.get(key, None)
results[key] = combine_vars(old_value, value, merge=merge_hashes)
result['ansible_included_var_files'] = self.included_files
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
if hasattr(self._task._ds, '_data_source'):
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _log_walk(self, error):
self._display.vvv('Issue with walking through "%s": %s' % (to_native(error.filename), to_native(error)))
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
sorted_walk = list(walk(self.source_dir, onerror=self._log_walk, followlinks=True))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
# Depth 1 is the root, relative_to omits the root
current_depth = len(pathlib.Path(current_root).relative_to(self.source_dir).parts) + 1
if self.depth != 0 and current_depth > self.depth:
continue
current_files.sort()
yield (current_root, current_files)
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
file_ext = path.splitext(source_file)
return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
err_msg = ('{0} does not have a valid extension: {1}'.format(to_native(filename), ', '.join(self.valid_extensions)))
else:
b_data, show_content = self._loader._get_file_contents(filename)
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content &= show_content # mask all results if any file was encrypted
data = self._loader.load(data, file_name=filename, show_content=show_content)
if not data:
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = ('{0} must be stored as a dictionary/hash'.format(to_native(filename)))
else:
self.included_files.append(filename)
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if path.join(self._task._role._role_path, filename) == path.join(root_dir, 'vars', 'main.yml'):
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if self.ignore_unknown_extensions:
if path.exists(filepath) and not self._ignore_file(filename) and self._is_valid_file_ext(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
else:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
| 11,573
|
Python
|
.py
| 246
| 34.756098
| 128
| 0.575873
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,375
|
normal.py
|
ansible_ansible/lib/ansible/plugins/action/normal.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
_supports_check_mode = True
_supports_async = True
def run(self, tmp=None, task_vars=None):
# individual modules might disagree but as the generic the action plugin, pass at this point.
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
wrap_async = self._task.async_val and not self._connection.has_native_async
# do work!
result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))
# hack to keep --verbose from showing all the setup module result
# moved from setup module as now we filter out all _ansible_ from result
if self._task.action in C._ACTION_SETUP:
result['_ansible_verbose_override'] = True
if not wrap_async:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 1,854
|
Python
|
.py
| 38
| 43.921053
| 101
| 0.725208
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,376
|
command.py
|
ansible_ansible/lib/ansible/plugins/action/command.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
self._supports_async = True
results = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
wrap_async = self._task.async_val and not self._connection.has_native_async
# explicitly call `ansible.legacy.command` for backcompat to allow library/ override of `command` while not allowing
# collections search for an unqualified `command` module
results = merge_hash(results, self._execute_module(module_name='ansible.legacy.command', task_vars=task_vars, wrap_async=wrap_async))
if not wrap_async:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return results
| 1,069
|
Python
|
.py
| 18
| 52.388889
| 141
| 0.716203
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,377
|
raw.py
|
ansible_ansible/lib/ansible/plugins/action/raw.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
if self._task.environment and any(self._task.environment):
self._display.warning('raw module does not support the environment keyword')
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.check_mode:
# in --check mode, always skip this module execution
result['skipped'] = True
return result
executable = self._task.args.get('executable', False)
result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
result['changed'] = True
if 'rc' in result and result['rc'] != 0:
result['failed'] = True
result['msg'] = 'non-zero return code'
return result
| 1,762
|
Python
|
.py
| 38
| 40.552632
| 113
| 0.694104
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,378
|
fetch.py
|
ansible_ansible/lib/ansible/plugins/action/fetch.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import base64
from ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleActionFail, AnsibleActionSkip
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe, is_subpath
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
""" handler for fetch operations """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
try:
if self._task.check_mode:
raise AnsibleActionSkip('check mode not (yet) supported for this module')
source = self._task.args.get('src', None)
original_dest = dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'), strict=False)
fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False)
validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False)
msg = ''
# validate source and dest are strings FIXME: use basic.py and module specs
if not isinstance(source, string_types):
msg = "Invalid type supplied for source option, it must be a string"
if not isinstance(dest, string_types):
msg = "Invalid type supplied for dest option, it must be a string"
if source is None or dest is None:
msg = "src and dest are required"
if msg:
raise AnsibleActionFail(msg)
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source)
remote_stat = {}
remote_checksum = None
if not self._connection.become:
# Get checksum for the remote file. Don't bother if using become as slurp will be used.
# Follow symlinks because fetch always follows symlinks
try:
remote_stat = self._execute_remote_stat(source, all_vars=task_vars, follow=True)
except AnsibleConnectionFailure:
raise
except AnsibleError as ae:
result['changed'] = False
result['file'] = source
if fail_on_missing:
result['failed'] = True
result['msg'] = to_text(ae)
else:
result['msg'] = "%s, ignored" % to_text(ae, errors='surrogate_or_replace')
return result
remote_checksum = remote_stat.get('checksum')
if remote_stat.get('exists'):
if remote_stat.get('isdir'):
result['failed'] = True
result['changed'] = False
result['msg'] = "remote file is a directory, fetch cannot work on directories"
# Historically, these don't fail because you may want to transfer
# a log file that possibly MAY exist but keep going to fetch other
# log files. Today, this is better achieved by adding
# ignore_errors or failed_when to the task. Control the behaviour
# via fail_when_missing
if not fail_on_missing:
result['msg'] += ", not transferring, ignored"
del result['changed']
del result['failed']
return result
# use slurp if permissions are lacking or privilege escalation is needed
remote_data = None
if remote_checksum in (None, '1', ''):
slurpres = self._execute_module(module_name='ansible.legacy.slurp', module_args=dict(src=source), task_vars=task_vars)
if slurpres.get('failed'):
if not fail_on_missing:
result['file'] = source
result['changed'] = False
else:
result.update(slurpres)
if 'not found' in slurpres.get('msg', ''):
result['msg'] = "the remote file does not exist, not transferring, ignored"
elif slurpres.get('msg', '').startswith('source is a directory'):
result['msg'] = "remote file is a directory, fetch cannot work on directories"
return result
else:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
# ensure we only use file name, avoid relative paths
if not is_subpath(dest, original_dest):
# TODO: ? dest = os.path.expanduser(dest.replace(('../','')))
raise AnsibleActionFail("Detected directory traversal, expected to be contained in '%s' but got '%s'" % (original_dest, dest))
if flat:
if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep):
raise AnsibleActionFail("dest is an existing directory, use a trailing slash if you want to fetch src into that directory")
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')):
raise AnsibleActionFail(
f"calculated dest '{dest}' is an existing directory, use another path that does not point to an existing directory")
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._play_context.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = os.path.normpath(dest)
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
try:
with open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') as f:
f.write(remote_data)
except (IOError, OSError) as e:
raise AnsibleActionFail("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
result.update(dict(failed=True, md5sum=new_md5,
msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
checksum=new_checksum, remote_checksum=remote_checksum))
else:
result.update({'changed': True, 'md5sum': new_md5, 'dest': dest,
'remote_md5sum': None, 'checksum': new_checksum,
'remote_checksum': remote_checksum})
else:
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 10,165
|
Python
|
.py
| 179
| 40.748603
| 144
| 0.564899
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,379
|
group_by.py
|
ansible_ansible/lib/ansible/plugins/action/group_by.py
|
# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
class ActionModule(ActionBase):
""" Create inventory groups based on variables """
# We need to be able to modify the inventory
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('key', 'parents'))
_requires_connection = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if 'key' not in self._task.args:
result['failed'] = True
result['msg'] = "the 'key' param is required when using group_by"
return result
group_name = self._task.args.get('key')
parent_groups = self._task.args.get('parents', ['all'])
if isinstance(parent_groups, string_types):
parent_groups = [parent_groups]
result['changed'] = False
result['add_group'] = group_name.replace(' ', '-')
result['parent_groups'] = [name.replace(' ', '-') for name in parent_groups]
return result
| 1,894
|
Python
|
.py
| 42
| 39.785714
| 84
| 0.687466
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,380
|
pause.py
|
ansible_ansible/lib/ansible/plugins/action/pause.py
|
# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import datetime
import time
from ansible.errors import AnsibleError, AnsiblePromptInterrupt, AnsiblePromptNoninteractive
from ansible.module_utils.common.text.converters import to_text
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
""" pauses execution for a length or time, or until input is received """
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=None):
""" run the pause action module """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
validation_result, new_module_args = self.validate_argument_spec(
argument_spec={
'echo': {'type': 'bool', 'default': True},
'minutes': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
'seconds': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
'prompt': {'type': 'str'},
},
mutually_exclusive=(
('minutes', 'seconds'),
),
)
duration_unit = 'minutes'
prompt = None
seconds = None
echo = new_module_args['echo']
echo_prompt = ''
result.update(dict(
changed=False,
rc=0,
stderr='',
stdout='',
start=None,
stop=None,
delta=None,
echo=echo
))
# Add a note saying the output is hidden if echo is disabled
if not echo:
echo_prompt = ' (output is hidden)'
if new_module_args['prompt']:
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), new_module_args['prompt'], echo_prompt)
else:
# If no custom prompt is specified, set a default prompt
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
if new_module_args['minutes'] is not None:
seconds = new_module_args['minutes'] * 60
elif new_module_args['seconds'] is not None:
seconds = new_module_args['seconds']
duration_unit = 'seconds'
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = to_text(datetime.datetime.now())
result['user_input'] = b''
default_input_complete = None
if seconds is not None:
if seconds < 1:
seconds = 1
# show the timer and control prompts
display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
# show the prompt specified in the task
if new_module_args['prompt']:
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r")
else:
# corner case where enter does not continue, wait for timeout/interrupt only
prompt = "(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"
# don't complete on LF/CR; we expect a timeout/interrupt and ignore user input when a pause duration is specified
default_input_complete = tuple()
# Only echo input if no timeout is specified
echo = seconds is None and echo
user_input = b''
try:
_user_input = display.prompt_until(prompt, private=not echo, seconds=seconds, complete_input=default_input_complete)
except AnsiblePromptInterrupt:
user_input = None
except AnsiblePromptNoninteractive:
if seconds is None:
display.warning("Not waiting for response to prompt as stdin is not interactive")
else:
# wait specified duration
time.sleep(seconds)
else:
if seconds is None:
user_input = _user_input
# user interrupt
if user_input is None:
prompt = "Press 'C' to continue the play or 'A' to abort \r"
try:
user_input = display.prompt_until(prompt, private=not echo, interrupt_input=(b'a',), complete_input=(b'c',))
except AnsiblePromptInterrupt:
raise AnsibleError('user requested abort!')
duration = time.time() - start
result['stop'] = to_text(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
result['user_input'] = to_text(user_input, errors='surrogate_or_strict')
return result
| 5,674
|
Python
|
.py
| 124
| 36.08871
| 129
| 0.60123
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,381
|
dnf.py
|
ansible_ansible/lib/ansible/plugins/action/dnf.py
|
# Copyright: (c) 2023, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.errors import AnsibleActionFail
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
VALID_BACKENDS = frozenset(("yum", "yum4", "dnf", "dnf4", "dnf5"))
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Carry-over concept from the package action plugin
if 'use' in self._task.args and 'use_backend' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('use', 'use_backend')")
module = self._task.args.get('use', self._task.args.get('use_backend', 'auto'))
if module in {'yum', 'auto'}:
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template("{{ansible_facts.pkg_mgr}}")
except Exception:
pass # could not get it from template!
if module not in VALID_BACKENDS:
facts = self._execute_module(
module_name="ansible.legacy.setup", module_args=dict(filter="ansible_pkg_mgr", gather_subset="!all"),
task_vars=task_vars)
if facts.get("failed", False):
raise AnsibleActionFail(
f"Failed to fetch ansible_pkg_mgr to determine the dnf action backend: {facts.get('msg')}",
result=facts,
)
display.debug("Facts %s" % facts)
module = facts.get("ansible_facts", {}).get("ansible_pkg_mgr", "auto")
if (not self._task.delegate_to or self._task.delegate_facts) and module != 'auto':
result['ansible_facts'] = {'pkg_mgr': module}
if module not in VALID_BACKENDS:
result.update(
{
'failed': True,
'msg': ("Could not detect which major revision of dnf is in use, which is required to determine module backend.",
"You should manually specify use_backend to tell the module whether to use the dnf4 or dnf5 backend})"),
}
)
else:
if module in {"yum4", "dnf4"}:
module = "dnf"
# eliminate collisions with collections search while still allowing local override
module = 'ansible.legacy.' + module
if not self._shared_loader_obj.module_loader.has_plugin(module):
result.update({'failed': True, 'msg': "Could not find a dnf module backend for %s." % module})
else:
new_module_args = self._task.args.copy()
if 'use_backend' in new_module_args:
del new_module_args['use_backend']
if 'use' in new_module_args:
del new_module_args['use']
display.vvvv("Running %s as the backend for the dnf action plugin" % module)
result.update(self._execute_module(
module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
return result
| 3,667
|
Python
|
.py
| 65
| 43.353846
| 133
| 0.590173
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,382
|
fail.py
|
ansible_ansible/lib/ansible/plugins/action/fail.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
""" Fail with custom message """
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('msg',))
_requires_connection = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
result['failed'] = True
result['msg'] = msg
return result
| 1,457
|
Python
|
.py
| 35
| 37.314286
| 70
| 0.70297
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,383
|
copy.py
|
ansible_ansible/lib/ansible/plugins/action/copy.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Toshio Kuratomi <tkuraotmi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import json
import os
import os.path
import stat
import tempfile
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
# Supplement the FILE_COMMON_ARGUMENTS with arguments that are specific to file
REAL_FILE_ARGS = frozenset(FILE_COMMON_ARGUMENTS.keys()).union(
('state', 'path', '_original_basename', 'recurse', 'force',
'_diff_peek', 'src'))
def _create_remote_file_args(module_args):
"""remove keys that are not relevant to file"""
return dict((k, v) for k, v in module_args.items() if k in REAL_FILE_ARGS)
def _create_remote_copy_args(module_args):
"""remove action plugin only keys"""
return dict((k, v) for k, v in module_args.items() if k not in ('content', 'decrypt'))
def _walk_dirs(topdir, base_path=None, local_follow=False, trailing_slash_detector=None):
"""
Walk a filesystem tree returning enough information to copy the files
:arg topdir: The directory that the filesystem tree is rooted at
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:returns: dictionary of tuples. All of the path elements in the structure are text strings.
This separates all the files, directories, and symlinks along with
important information about each::
{ 'files': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
'directories': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
'symlinks': [('/symlink/target/path', 'relative/path/to/copy/to'), ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u''):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = os.path.realpath(filepath)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append((real_file, dest_filepath))
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append((os.readlink(filepath), dest_filepath))
else:
# Just a normal file
r_files['files'].append((filepath, dest_filepath))
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a circular symlink. So add it as
# a symlink
r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append((real_dir, dest_dirpath))
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents), rel_base=dest_dirpath)
else:
# Add the symlink to the destination
r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
else:
# Just a normal directory
r_files['directories'].append((dirpath, dest_dirpath))
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = (os.readlink(topdir), os.path.basename(topdir))
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents)
return r_files
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _ensure_invocation(self, result):
# NOTE: adding invocation arguments here needs to be kept in sync with
# any no_log specified in the argument_spec in the module.
# This is not automatic.
# NOTE: do not add to this. This should be made a generic function for action plugins.
# This should also use the same argspec as the module instead of keeping it in sync.
if 'invocation' not in result:
if self._task.no_log:
result['invocation'] = "CENSORED: no_log is set"
else:
# NOTE: Should be removed in the future. For now keep this broken
# behaviour, have a look in the PR 51582
result['invocation'] = self._task.args.copy()
result['invocation']['module_args'] = self._task.args.copy()
if isinstance(result['invocation'], dict):
if 'content' in result['invocation']:
result['invocation']['content'] = 'CENSORED: content is a no_log parameter'
if result['invocation'].get('module_args', {}).get('content') is not None:
result['invocation']['module_args']['content'] = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
return result
def _copy_file(self, source_full, source_rel, content, content_tempfile,
dest, task_vars, follow):
decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
force = boolean(self._task.args.get('force', 'yes'), strict=False)
raw = boolean(self._task.args.get('raw', 'no'), strict=False)
result = {}
result['diff'] = []
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source_full, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
return result
# Get the local mode and set if user wanted it preserved
# https://github.com/ansible/ansible-modules-core/issues/1124
lmode = None
if self._task.args.get('mode', None) == 'preserve':
lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode)
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
if self._connection._shell.path_has_trailing_slash(dest):
dest_file = self._connection._shell.join_path(dest, source_rel)
else:
dest_file = dest
# Attempt to get remote file info
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force)
if dest_status['exists'] and dest_status['isdir']:
# The dest is a directory.
if content is not None:
# If source was defined as content remove the temporary file and fail out.
self._remove_tempfile_if_content_defined(content, content_tempfile)
result['failed'] = True
result['msg'] = "can not use content with a dir as dest"
return result
else:
# Append the relative source location to the destination and get remote stats again
dest_file = self._connection._shell.join_path(dest, source_rel)
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force)
if dest_status['exists'] and not force:
# remote_file exists so continue to next iteration.
return None
# Generate a hash of the local file.
local_checksum = checksum(source_full)
if local_checksum != dest_status['checksum']:
# The checksums don't match and we will change or error out.
if self._task.diff and not raw:
result['diff'].append(self._get_diff_data(dest_file, source_full, task_vars, content))
if self._task.check_mode:
self._remove_tempfile_if_content_defined(content, content_tempfile)
result['changed'] = True
return result
# Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, '.source')
# ensure we keep suffix for validate
suffix = os.path.splitext(dest_file)[1]
if suffix:
tmp_src += suffix
remote_path = None
if not raw:
remote_path = self._transfer_file(source_full, tmp_src)
else:
self._transfer_file(source_full, dest_file)
# We have copied the file remotely and no longer require our content_tempfile
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
# FIXME: I don't think this is needed when PIPELINING=0 because the source is created
# world readable. Access to the directory itself is controlled via fixup_perms2() as
# part of executing the module. Check that umask with scp/sftp/piped doesn't cause
# a problem before acting on this idea. (This idea would save a round-trip)
# fix file permissions when the copy is done as a different user
if remote_path:
self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
if raw:
# Continue to next iteration if raw is defined.
return None
# Run the copy module
# src and dest here come after original and override them
# we pass dest only to make sure it includes trailing slash in case of recursive copy
new_module_args = _create_remote_copy_args(self._task.args)
new_module_args.update(
dict(
src=tmp_src,
dest=dest,
_original_basename=source_rel,
follow=follow
)
)
if not self._task.args.get('checksum'):
new_module_args['checksum'] = local_checksum
if lmode:
new_module_args['mode'] = lmode
module_return = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
else:
# no need to transfer the file, already correct hash, but still need to call
# the file module in case we want to change attributes
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
if raw:
return None
# Fix for https://github.com/ansible/ansible-modules-core/issues/1568.
# If checksums match, and follow = True, find out if 'dest' is a link. If so,
# change it to point to the source of the link.
if follow:
dest_status_nofollow = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=False)
if dest_status_nofollow['islnk'] and 'lnk_source' in dest_status_nofollow.keys():
dest = dest_status_nofollow['lnk_source']
# Build temporary module_args.
new_module_args = _create_remote_file_args(self._task.args)
new_module_args.update(
dict(
dest=dest,
_original_basename=source_rel,
recurse=False,
state='file',
)
)
# src is sent to the file module in _original_basename, not in src
try:
del new_module_args['src']
except KeyError:
pass
if lmode:
new_module_args['mode'] = lmode
# Execute the file module.
module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
if not module_return.get('checksum'):
module_return['checksum'] = local_checksum
result.update(module_return)
return result
def _create_content_tempfile(self, content):
""" Create a tempfile containing defined content """
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP, prefix='.')
content = to_bytes(content)
try:
with os.fdopen(fd, 'wb') as f:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
return content_tempfile
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def run(self, tmp=None, task_vars=None):
""" handler for file transfer operations """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
local_follow = boolean(self._task.args.get('local_follow', True), strict=False)
result['failed'] = True
if not source and content is None:
result['msg'] = 'src (or content) is required'
elif not dest:
result['msg'] = 'dest is required'
elif source and content is not None:
result['msg'] = 'src and content are mutually exclusive'
elif content is not None and dest is not None and dest.endswith("/"):
result['msg'] = "can not use content with a dir as dest"
else:
del result['failed']
if result.get('failed'):
return self._ensure_invocation(result)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
# If content is defined make a tmp file and write the content into it.
if content is not None:
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content temp file: %s" % to_native(err)
return self._ensure_invocation(result)
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
elif remote_src:
result.update(self._execute_module(module_name='ansible.legacy.copy', task_vars=task_vars))
return self._ensure_invocation(result)
else:
# find_needle returns a path that may not have a trailing slash on
# a directory so we need to determine that now (we use it just
# like rsync does to figure out whether to include the directory
# or only the files inside the directory
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return self._ensure_invocation(result)
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, local_follow=local_follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = self._connection._shell.join_path(dest, '')
# FIXME: Can we optimize cases where there's only one file, no
# symlinks and any number of directories? In the original code,
# empty directories are not copied....
else:
source_files['files'] = [(source, os.path.basename(source))]
changed = False
module_return = dict(changed=False)
# A register for if we executed a module.
# Used to cut down on command calls when not recursive.
module_executed = False
# expand any user home dir specifier
dest = self._remote_expand_user(dest)
implicit_directories = set()
for source_full, source_rel in source_files['files']:
# copy files over. This happens first as directories that have
# a file do not need to be created later
# We only follow symlinks for files in the non-recursive case
if source_files['directories']:
follow = False
else:
follow = boolean(self._task.args.get('follow', False), strict=False)
module_return = self._copy_file(source_full, source_rel, content, content_tempfile, dest, task_vars, follow)
if module_return is None:
continue
if module_return.get('failed'):
result.update(module_return)
return self._ensure_invocation(result)
paths = os.path.split(source_rel)
dir_path = ''
for dir_component in paths:
os.path.join(dir_path, dir_component)
implicit_directories.add(dir_path)
if 'diff' in result and not result['diff']:
del result['diff']
module_executed = True
changed = changed or module_return.get('changed', False)
for src, dest_path in source_files['directories']:
# Find directories that are leaves as they might not have been
# created yet.
if dest_path in implicit_directories:
continue
# Use file module to create these
new_module_args = _create_remote_file_args(self._task.args)
new_module_args['path'] = os.path.join(dest, dest_path)
new_module_args['state'] = 'directory'
new_module_args['mode'] = self._task.args.get('directory_mode', None)
new_module_args['recurse'] = False
del new_module_args['src']
module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
if module_return.get('failed'):
result.update(module_return)
return self._ensure_invocation(result)
module_executed = True
changed = changed or module_return.get('changed', False)
for target_path, dest_path in source_files['symlinks']:
# Copy symlinks over
new_module_args = _create_remote_file_args(self._task.args)
new_module_args['path'] = os.path.join(dest, dest_path)
new_module_args['src'] = target_path
new_module_args['state'] = 'link'
new_module_args['force'] = True
# Only follow remote symlinks in the non-recursive case
if source_files['directories']:
new_module_args['follow'] = False
# file module cannot deal with 'preserve' mode and is meaningless
# for symlinks anyway, so just don't pass it.
if new_module_args.get('mode', None) == 'preserve':
new_module_args.pop('mode')
module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
module_executed = True
if module_return.get('failed'):
result.update(module_return)
return self._ensure_invocation(result)
changed = changed or module_return.get('changed', False)
if module_executed and len(source_files['files']) == 1:
result.update(module_return)
# the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in result and 'dest' not in result:
result['dest'] = result['path']
else:
result.update(dict(dest=dest, src=source, changed=changed))
# Delete tmp path
self._remove_tmp_path(self._connection._shell.tmpdir)
return self._ensure_invocation(result)
| 27,422
|
Python
|
.py
| 496
| 42.282258
| 133
| 0.599732
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,384
|
assert.py
|
ansible_ansible/lib/ansible/plugins/action/assert.py
|
# Copyright 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.playbook.conditional import Conditional
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
class ActionModule(ActionBase):
""" Fail with custom message """
_requires_connection = False
_VALID_ARGS = frozenset(('fail_msg', 'msg', 'quiet', 'success_msg', 'that'))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if 'that' not in self._task.args:
raise AnsibleError('conditional required in "that" string')
fail_msg = None
success_msg = None
fail_msg = self._task.args.get('fail_msg', self._task.args.get('msg'))
if fail_msg is None:
fail_msg = 'Assertion failed'
elif isinstance(fail_msg, list):
if not all(isinstance(x, string_types) for x in fail_msg):
raise AnsibleError('Type of one of the elements in fail_msg or msg list is not string type')
elif not isinstance(fail_msg, (string_types, list)):
raise AnsibleError('Incorrect type for fail_msg or msg, expected a string or list and got %s' % type(fail_msg))
success_msg = self._task.args.get('success_msg')
if success_msg is None:
success_msg = 'All assertions passed'
elif isinstance(success_msg, list):
if not all(isinstance(x, string_types) for x in success_msg):
raise AnsibleError('Type of one of the elements in success_msg list is not string type')
elif not isinstance(success_msg, (string_types, list)):
raise AnsibleError('Incorrect type for success_msg, expected a string or list and got %s' % type(success_msg))
quiet = boolean(self._task.args.get('quiet', False), strict=False)
# make sure the 'that' items are a list
thats = self._task.args['that']
if not isinstance(thats, list):
thats = [thats]
# Now we iterate over the that items, temporarily assigning them
# to the task's when value so we can evaluate the conditional using
# the built in evaluate function. The when has already been evaluated
# by this point, and is not used again, so we don't care about mangling
# that value now
cond = Conditional(loader=self._loader)
if not quiet:
result['_ansible_verbose_always'] = True
for that in thats:
cond.when = [that]
test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result['failed'] = True
result['evaluated_to'] = test_result
result['assertion'] = that
result['msg'] = fail_msg
return result
result['changed'] = False
result['msg'] = success_msg
return result
| 3,844
|
Python
|
.py
| 76
| 42.552632
| 123
| 0.664
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,385
|
add_host.py
|
ansible_ansible/lib/ansible/plugins/action/add_host.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from collections.abc import Mapping
from ansible.errors import AnsibleActionFail
from ansible.module_utils.six import string_types
from ansible.plugins.action import ActionBase
from ansible.parsing.utils.addresses import parse_address
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
class ActionModule(ActionBase):
""" Create inventory hosts and groups in the memory inventory"""
# We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
_requires_connection = False
_supports_check_mode = True
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
args = self._task.args
raw = args.pop('_raw_params', {})
if isinstance(raw, Mapping):
# TODO: create 'conflict' detection in base class to deal with repeats and aliases and warn user
args = combine_vars(raw, args)
else:
raise AnsibleActionFail('Invalid raw parameters passed, requires a dictionary/mapping got a %s' % type(raw))
# Parse out any hostname:port patterns
new_name = args.get('name', args.get('hostname', args.get('host', None)))
if new_name is None:
raise AnsibleActionFail('name, host or hostname needs to be provided')
display.vv("creating host via 'add_host': hostname=%s" % new_name)
try:
name, port = parse_address(new_name, allow_ranges=False)
except Exception:
# not a parsable hostname, but might still be usable
name = new_name
port = None
if port:
args['ansible_ssh_port'] = port
groups = args.get('groupname', args.get('groups', args.get('group', '')))
# add it to the group if that was specified
new_groups = []
if groups:
if isinstance(groups, list):
group_list = groups
elif isinstance(groups, string_types):
group_list = groups.split(",")
else:
raise AnsibleActionFail("Groups must be specified as a list.", obj=self._task)
for group_name in group_list:
if group_name not in new_groups:
new_groups.append(group_name.strip())
# Add any variables to the new_host
host_vars = dict()
special_args = frozenset(('name', 'hostname', 'groupname', 'groups'))
for k in args.keys():
if k not in special_args:
host_vars[k] = args[k]
result['changed'] = False
result['add_host'] = dict(host_name=name, groups=new_groups, host_vars=host_vars)
return result
| 3,579
|
Python
|
.py
| 77
| 38.961039
| 121
| 0.664466
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,386
|
__init__.py
|
ansible_ansible/lib/ansible/plugins/action/__init__.py
|
# coding: utf-8
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import base64
import json
import os
import re
import secrets
import shlex
import stat
import tempfile
from abc import ABC, abstractmethod
from collections.abc import Sequence
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail, AnsibleAuthenticationFailure
from ansible.executor.module_common import modify_module
from ansible.executor.interpreter_discovery import discover_interpreter, InterpreterDiscoveryRequiredError
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
from ansible.module_utils.errors import UnsupportedError
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.six import binary_type, string_types, text_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.parsing.utils.jsonify import jsonify
from ansible.release import __version__
from ansible.utils.collection_loader import resource_from_fqcr
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import wrap_var, AnsibleUnsafeText
from ansible.vars.clean import remove_internal_keys
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
def _validate_utf8_json(d):
if isinstance(d, text_type):
# Purposefully not using to_bytes here for performance reasons
d.encode(encoding='utf-8', errors='strict')
elif isinstance(d, dict):
for o in d.items():
_validate_utf8_json(o)
elif isinstance(d, (list, tuple)):
for o in d:
_validate_utf8_json(o)
class ActionBase(ABC):
"""
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
"""
# A set of valid arguments
_VALID_ARGS = frozenset([]) # type: frozenset[str]
# behavioral attributes
BYPASS_HOST_LOOP = False
TRANSFERS_FILES = False
_requires_connection = True
_supports_check_mode = True
_supports_async = False
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
self._cleanup_remote_tmp = False
# interpreter discovery state
self._discovered_interpreter_key = None
self._discovered_interpreter = False
self._discovery_deprecation_warnings = []
self._discovery_warnings = []
self._used_interpreter = None
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
another one and wants to use the same remote tmp for both should set
self._connection._shell.tmpdir rather than this parameter.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementers of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
# does not default to {'changed': False, 'failed': False}, as it breaks async
result = {}
if tmp is not None:
result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir']
del tmp
if self._task.async_val and not self._supports_async:
raise AnsibleActionFail('This action (%s) does not support async.' % self._task.action)
elif self._task.check_mode and not self._supports_check_mode:
raise AnsibleActionSkip('This action (%s) does not support check mode.' % self._task.action)
# Error if invalid argument is passed
if self._VALID_ARGS:
task_opts = frozenset(self._task.args.keys())
bad_opts = task_opts.difference(self._VALID_ARGS)
if bad_opts:
raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts))))
if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
self._make_tmp_path()
return result
def validate_argument_spec(self, argument_spec=None,
mutually_exclusive=None,
required_together=None,
required_one_of=None,
required_if=None,
required_by=None,
):
"""Validate an argument spec against the task args
This will return a tuple of (ValidationResult, dict) where the dict
is the validated, coerced, and normalized task args.
Be cautious when directly passing ``new_module_args`` directly to a
module invocation, as it will contain the defaults, and not only
the args supplied from the task. If you do this, the module
should not define ``mutually_exclusive`` or similar.
This code is roughly copied from the ``validate_argument_spec``
action plugin for use by other action plugins.
"""
new_module_args = self._task.args.copy()
validator = ArgumentSpecValidator(
argument_spec,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
required_if=required_if,
required_by=required_by,
)
validation_result = validator.validate(new_module_args)
new_module_args.update(validation_result.validated_parameters)
try:
error = validation_result.errors[0]
except IndexError:
error = None
# Fail for validation errors, even in check mode
if error:
msg = validation_result.errors.msg
if isinstance(error, UnsupportedError):
msg = f"Unsupported parameters for ({self._load_name}) module: {msg}"
raise AnsibleActionFail(msg)
return validation_result, new_module_args
def cleanup(self, force=False):
"""Method to perform a clean up at the end of an action plugin execution
By default this is designed to clean up the shell tmpdir, and is toggled based on whether
async is in use
Action plugins may override this if they deem necessary, but should still call this method
via super
"""
if force or not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
def get_plugin_option(self, plugin, option, default=None):
"""Helper to get an option from a plugin without having to use
the try/except dance everywhere to set a default
"""
try:
return plugin.get_option(option)
except (AttributeError, KeyError):
return default
def get_become_option(self, option, default=None):
return self.get_plugin_option(self._connection.become, option, default=default)
def get_connection_option(self, option, default=None):
return self.get_plugin_option(self._connection, option, default=default)
def get_shell_option(self, option, default=None):
return self.get_plugin_option(self._connection._shell, option, default=default)
def _remote_file_exists(self, path):
cmd = self._connection._shell.exists(path)
result = self._low_level_execute_command(cmd=cmd, sudoable=True)
if result['rc'] == 0:
return True
return False
def _configure_module(self, module_name, module_args, task_vars):
"""
Handles the loading and templating of the module code through the
modify_module() function.
"""
if self._task.delegate_to:
use_vars = task_vars.get('ansible_delegated_vars')[self._task.delegate_to]
else:
use_vars = task_vars
split_module_name = module_name.split('.')
collection_name = '.'.join(split_module_name[0:2]) if len(split_module_name) > 2 else ''
leaf_module_name = resource_from_fqcr(module_name)
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# FIXME: This should be temporary and moved to an exec subsystem plugin where we can define the mapping
# for each subsystem.
win_collection = 'ansible.windows'
rewrite_collection_names = ['ansible.builtin', 'ansible.legacy', '']
# async_status, win_stat, win_file, win_copy, and win_ping are not just like their
# python counterparts but they are compatible enough for our
# internal usage
# NB: we only rewrite the module if it's not being called by the user (eg, an action calling something else)
# and if it's unqualified or FQ to a builtin
if leaf_module_name in ('stat', 'file', 'copy', 'ping') and \
collection_name in rewrite_collection_names and self._task.action != module_name:
module_name = '%s.win_%s' % (win_collection, leaf_module_name)
elif leaf_module_name == 'async_status' and collection_name in rewrite_collection_names:
module_name = '%s.%s' % (win_collection, leaf_module_name)
# TODO: move this tweak down to the modules, not extensible here
# Remove extra quotes surrounding path parameters before sending to module.
if leaf_module_name in ['win_stat', 'win_file', 'win_copy', 'slurp'] and module_args and \
hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
result = self._shared_loader_obj.module_loader.find_plugin_with_context(module_name, mod_type, collection_list=self._task.collections)
if not result.resolved:
if result.redirect_list and len(result.redirect_list) > 1:
# take the last one in the redirect list, we may have successfully jumped through N other redirects
target_module_name = result.redirect_list[-1]
raise AnsibleError("The module {0} was redirected to {1}, which could not be loaded.".format(module_name, target_module_name))
module_path = result.plugin_resolved_path
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
# insert shared code and arguments into the module
final_environment = dict()
self._compute_environment_string(final_environment)
become_kwargs = {}
if self._connection.become:
become_kwargs['become'] = True
become_kwargs['become_method'] = self._connection.become.name
become_kwargs['become_user'] = self._connection.become.get_option('become_user',
playcontext=self._play_context)
become_kwargs['become_password'] = self._connection.become.get_option('become_pass',
playcontext=self._play_context)
become_kwargs['become_flags'] = self._connection.become.get_option('become_flags',
playcontext=self._play_context)
# modify_module will exit early if interpreter discovery is required; re-run after if necessary
for dummy in (1, 2):
try:
(module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar,
task_vars=use_vars,
module_compression=C.config.get_config_value('DEFAULT_MODULE_COMPRESSION',
variables=task_vars),
async_timeout=self._task.async_val,
environment=final_environment,
remote_is_local=bool(getattr(self._connection, '_remote_is_local', False)),
**become_kwargs)
break
except InterpreterDiscoveryRequiredError as idre:
self._discovered_interpreter = AnsibleUnsafeText(discover_interpreter(
action=self,
interpreter_name=idre.interpreter_name,
discovery_mode=idre.discovery_mode,
task_vars=use_vars))
# update the local task_vars with the discovered interpreter (which might be None);
# we'll propagate back to the controller in the task result
discovered_key = 'discovered_interpreter_%s' % idre.interpreter_name
# update the local vars copy for the retry
use_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
# TODO: this condition prevents 'wrong host' from being updated
# but in future we would want to be able to update 'delegated host facts'
# irrespective of task settings
if not self._task.delegate_to or self._task.delegate_facts:
# store in local task_vars facts collection for the retry and any other usages in this worker
task_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
# preserve this so _execute_module can propagate back to controller as a fact
self._discovered_interpreter_key = discovered_key
else:
task_vars['ansible_delegated_vars'][self._task.delegate_to]['ansible_facts'][discovered_key] = self._discovered_interpreter
return (module_style, module_shebang, module_data, module_path)
def _compute_environment_string(self, raw_environment_out=None):
"""
Builds the environment string to be used when executing the remote task.
"""
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [environments]
# The order of environments matters to make sure we merge
# in the parent's values first so those in the block then
# task 'win' in precedence
for environment in environments:
if environment is None or len(environment) == 0:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
if len(final_environment) > 0:
final_environment = self._templar.template(final_environment)
if isinstance(raw_environment_out, dict):
raw_environment_out.clear()
raw_environment_out.update(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
"""
Determines if a tmp path should be created before the action is executed.
"""
return getattr(self, 'TRANSFERS_FILES', False)
def _is_pipelining_enabled(self, module_style, wrap_async=False):
"""
Determines if we are required and can do pipelining
"""
try:
is_enabled = self._connection.get_option('pipelining')
except (KeyError, AttributeError, ValueError):
is_enabled = self._play_context.pipelining
# winrm supports async pipeline
# TODO: make other class property 'has_async_pipelining' to separate cases
always_pipeline = self._connection.always_pipeline_modules
# su does not work with pipelining
# TODO: add has_pipelining class prop to become plugins
become_exception = (self._connection.become.name if self._connection.become else '') != 'su'
# any of these require a true
conditions = [
self._connection.has_pipelining, # connection class supports it
is_enabled or always_pipeline, # enabled via config or forced via connection (eg winrm)
module_style == "new", # old style modules do not support pipelining
not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
not wrap_async or always_pipeline, # async does not normally support pipelining unless it does (eg winrm)
become_exception,
]
return all(conditions)
def _get_admin_users(self):
"""
Returns a list of admin users that are configured for the current shell
plugin
"""
return self.get_shell_option('admin_users', ['root'])
def _get_remote_addr(self, tvars):
""" consistently get the 'remote_address' for the action plugin """
remote_addr = tvars.get('delegated_vars', {}).get('ansible_host', tvars.get('ansible_host', tvars.get('inventory_hostname', None)))
for variation in ('remote_addr', 'host'):
try:
remote_addr = self._connection.get_option(variation)
except KeyError:
continue
break
else:
# plugin does not have, fallback to play_context
remote_addr = self._play_context.remote_addr
return remote_addr
def _get_remote_user(self):
""" consistently get the 'remote_user' for the action plugin """
# TODO: use 'current user running ansible' as fallback when moving away from play_context
# pwd.getpwuid(os.getuid()).pw_name
remote_user = None
try:
remote_user = self._connection.get_option('remote_user')
except KeyError:
# plugin does not have remote_user option, fallback to default and/play_context
remote_user = getattr(self._connection, 'default_user', None) or self._play_context.remote_user
except AttributeError:
# plugin does not use config system, fallback to old play_context
remote_user = self._play_context.remote_user
return remote_user
def _is_become_unprivileged(self):
"""
The user is not the same as the connection user and is not part of the
shell configured admin users
"""
# if we don't use become then we know we aren't switching to a
# different unprivileged user
if not self._connection.become:
return False
# if we use become and the user is not an admin (or same user) then
# we need to return become_unprivileged as True
admin_users = self._get_admin_users()
remote_user = self._get_remote_user()
become_user = self.get_become_option('become_user')
return bool(become_user and become_user not in admin_users + [remote_user])
def _make_tmp_path(self, remote_user=None):
"""
Create and return a temporary path on a remote box.
"""
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
tmpdir = C.DEFAULT_LOCAL_TMP
else:
# NOTE: shell plugins should populate this setting anyways, but they dont do remote expansion, which
# we need for 'non posix' systems like cloud-init and solaris
tmpdir = self._remote_expand_user(self.get_shell_option('remote_tmp', default='~/.ansible/tmp'), sudoable=False)
become_unprivileged = self._is_become_unprivileged()
basefile = self._connection._shell._generate_temp_dir_name()
cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
result = self._low_level_execute_command(cmd, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if display.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection. '
'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Failed to create temporary directory. '
'In some cases, you may have been able to authenticate and did not have permissions on the target directory. '
'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp", for more error information use -vvv. '
'Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u", stdout output: %s" % result['stdout']
if display.verbosity > 3 and 'stderr' in result and result['stderr'] != u'':
output += u", stderr output: %s" % result['stderr']
raise AnsibleConnectionFailure(output)
else:
self._cleanup_remote_tmp = True
try:
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
self._connection._shell.tmpdir = rc
return rc
def _should_remove_tmp_path(self, tmp_path):
"""Determine if temporary path should be deleted or kept by user request/config"""
return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path
def _remove_tmp_path(self, tmp_path, force=False):
"""Remove a temporary path we created. """
if tmp_path is None and self._connection._shell.tmpdir:
tmp_path = self._connection._shell.tmpdir
if force or self._should_remove_tmp_path(tmp_path):
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working connection configuration.
# If the connection breaks we could leave tmp directories out on the remote system.
tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)
if tmp_rm_res.get('rc', 0) != 0:
display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
% (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
else:
self._connection._shell.tmpdir = None
def _transfer_file(self, local_path, remote_path):
"""
Copy a file from the controller to a remote path
:arg local_path: Path on controller to transfer
:arg remote_path: Path on the remote system to transfer into
.. warning::
* When you use this function you likely want to use use fixup_perms2() on the
remote_path to make sure that the remote file is readable when the user becomes
a non-privileged user.
* If you use fixup_perms2() on the file and copy or move the file into place, you will
need to then remove filesystem acls on the file once it has been copied into place by
the module. See how the copy module implements this for help.
"""
self._connection.put_file(local_path, remote_path)
return remote_path
def _transfer_data(self, remote_path, data):
"""
Copies the module data out to the temporary module path.
"""
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
afo = os.fdopen(afd, 'wb')
try:
data = to_bytes(data, errors='surrogate_or_strict')
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
afo.flush()
afo.close()
try:
self._transfer_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
(because the files could contain passwords or other private
information. We achieve this in one of these ways:
* If no sudo is performed or the remote_user is sudo'ing to
themselves, we don't have to change permissions.
* If the remote_user sudo's to a privileged user (for instance, root),
we don't have to change permissions
* If the remote_user sudo's to an unprivileged user then we attempt to
grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
* If the above fails, we next try 'chmod +a' which is a macOS way of
setting ACLs on files.
* If the above fails, we check if ansible_common_remote_group is set.
If it is, we attempt to chgrp the file to its value. This is useful
if the remote_user has a group in common with the become_user. As the
remote_user, we can chgrp the file to that group and allow the
become_user to read it.
* If (the chown fails AND ansible_common_remote_group is not set) OR
(ansible_common_remote_group is set AND the chgrp (or following chmod)
returned non-zero), we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this if ansible is configured with
"allow_world_readable_tmpfiles" in the ansible.cfg. Also note that
when ansible_common_remote_group is set this final fallback is very
unlikely to ever be triggered, so long as chgrp was successful. But
just because the chgrp was successful, does not mean Ansible can
necessarily access the files (if, for example, the variable was set
to a group that remote_user is in, and can chgrp to, but does not have
in common with become_user).
"""
if remote_user is None:
remote_user = self._get_remote_user()
# Step 1: Are we on windows?
if getattr(self._connection._shell, "_IS_WINDOWS", False):
# This won't work on Powershell as-is, so we'll just completely
# skip until we have a need for it, at which point we'll have to do
# something different.
return remote_paths
# Step 2: If we're not becoming an unprivileged user, we are roughly
# done. Make the files +x if we're asked to, and return.
if not self._is_become_unprivileged():
if execute:
# Can't depend on the file being transferred with execute permissions.
# Only need user perms because no become was used here
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError(
'Failed to set execute bit on remote files '
'(rc: {0}, err: {1})'.format(
res['rc'],
to_native(res['stderr'])))
return remote_paths
# If we're still here, we have an unprivileged user that's different
# than the ssh user.
become_user = self.get_become_option('become_user')
# Try to use file system acls to make the files readable for sudo'd
# user
if execute:
chmod_mode = 'rx'
setfacl_mode = 'r-x'
# Apple patches their "file_cmds" chmod with ACL support
chmod_acl_mode = '{0} allow read,execute'.format(become_user)
# POSIX-draft ACL specification. Solaris, maybe others.
# See chmod(1) on something Solaris-based for syntax details.
posix_acl_mode = 'A+user:{0}:rx:allow'.format(become_user)
else:
chmod_mode = 'rX'
# TODO: this form fails silently on freebsd. We currently
# never call _fixup_perms2() with execute=False but if we
# start to we'll have to fix this.
setfacl_mode = 'r-X'
# Apple
chmod_acl_mode = '{0} allow read'.format(become_user)
# POSIX-draft
posix_acl_mode = 'A+user:{0}:r:allow'.format(become_user)
# Step 3a: Are we able to use setfacl to add user ACLs to the file?
res = self._remote_set_user_facl(
remote_paths,
become_user,
setfacl_mode)
if res['rc'] == 0:
return remote_paths
# Step 3b: Set execute if we need to. We do this before anything else
# because some of the methods below might work but not let us set +x
# as part of them.
if execute:
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError(
'Failed to set file mode or acl on remote temporary files '
'(rc: {0}, err: {1})'.format(
res['rc'],
to_native(res['stderr'])))
# Step 3c: File system ACLs failed above; try falling back to chown.
res = self._remote_chown(remote_paths, become_user)
if res['rc'] == 0:
return remote_paths
# Check if we are an admin/root user. If we are and got here, it means
# we failed to chown as root and something weird has happened.
if remote_user in self._get_admin_users():
raise AnsibleError(
'Failed to change ownership of the temporary files Ansible '
'(via chmod nor setfacl) needs to create despite connecting as a '
'privileged user. Unprivileged become user would be unable to read'
' the file.')
# Step 3d: Try macOS's special chmod + ACL
# macOS chmod's +a flag takes its own argument. As a slight hack, we
# pass that argument as the first element of remote_paths. So we end
# up running `chmod +a [that argument] [file 1] [file 2] ...`
try:
res = self._remote_chmod([chmod_acl_mode] + list(remote_paths), '+a')
except AnsibleAuthenticationFailure as e:
# Solaris-based chmod will return 5 when it sees an invalid mode,
# and +a is invalid there. Because it returns 5, which is the same
# thing sshpass returns on auth failure, our sshpass code will
# assume that auth failed. If we don't handle that case here, none
# of the other logic below will get run. This is fairly hacky and a
# corner case, but probably one that shows up pretty often in
# Solaris-based environments (and possibly others).
pass
else:
if res['rc'] == 0:
return remote_paths
# Step 3e: Try Solaris/OpenSolaris/OpenIndiana-sans-setfacl chmod
# Similar to macOS above, Solaris 11.4 drops setfacl and takes file ACLs
# via chmod instead. OpenSolaris and illumos-based distros allow for
# using either setfacl or chmod, and compatibility depends on filesystem.
# It should be possible to debug this branch by installing OpenIndiana
# (use ZFS) and going unpriv -> unpriv.
res = self._remote_chmod(remote_paths, posix_acl_mode)
if res['rc'] == 0:
return remote_paths
# we'll need this down here
become_link = get_versioned_doclink('playbook_guide/playbooks_privilege_escalation.html')
# Step 3f: Common group
# Otherwise, we're a normal user. We failed to chown the paths to the
# unprivileged user, but if we have a common group with them, we should
# be able to chown it to that.
#
# Note that we have no way of knowing if this will actually work... just
# because chgrp exits successfully does not mean that Ansible will work.
# We could check if the become user is in the group, but this would
# create an extra round trip.
#
# Also note that due to the above, this can prevent the
# world_readable_temp logic below from ever getting called. We
# leave this up to the user to rectify if they have both of these
# features enabled.
group = self.get_shell_option('common_remote_group')
if group is not None:
res = self._remote_chgrp(remote_paths, group)
if res['rc'] == 0:
# warn user that something might go weirdly here.
if self.get_shell_option('world_readable_temp'):
display.warning(
'Both common_remote_group and '
'allow_world_readable_tmpfiles are set. chgrp was '
'successful, but there is no guarantee that Ansible '
'will be able to read the files after this operation, '
'particularly if common_remote_group was set to a '
'group of which the unprivileged become user is not a '
'member. In this situation, '
'allow_world_readable_tmpfiles is a no-op. See this '
'URL for more details: %s'
'#risks-of-becoming-an-unprivileged-user' % become_link)
if execute:
group_mode = 'g+rwx'
else:
group_mode = 'g+rw'
res = self._remote_chmod(remote_paths, group_mode)
if res['rc'] == 0:
return remote_paths
# Step 4: World-readable temp directory
if self.get_shell_option('world_readable_temp'):
# chown and fs acls failed -- do things this insecure way only if
# the user opted in in the config file
display.warning(
'Using world-readable permissions for temporary files Ansible '
'needs to create when becoming an unprivileged user. This may '
'be insecure. For information on securing this, see %s'
'#risks-of-becoming-an-unprivileged-user' % become_link)
res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
if res['rc'] == 0:
return remote_paths
raise AnsibleError(
'Failed to set file mode on remote files '
'(rc: {0}, err: {1})'.format(
res['rc'],
to_native(res['stderr'])))
raise AnsibleError(
'Failed to set permissions on the temporary files Ansible needs '
'to create when becoming an unprivileged user '
'(rc: %s, err: %s}). For information on working around this, see %s'
'#risks-of-becoming-an-unprivileged-user' % (
res['rc'],
to_native(res['stderr']), become_link))
def _remote_chmod(self, paths, mode, sudoable=False):
"""
Issue a remote chmod command
"""
cmd = self._connection._shell.chmod(paths, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, paths, user, sudoable=False):
"""
Issue a remote chown command
"""
cmd = self._connection._shell.chown(paths, user)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chgrp(self, paths, group, sudoable=False):
"""
Issue a remote chgrp command
"""
cmd = self._connection._shell.chgrp(paths, group)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
"""
Issue a remote call to setfacl
"""
cmd = self._connection._shell.set_user_facl(paths, user, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True):
"""
Get information from remote file.
"""
if tmp is not None:
display.warning('_execute_remote_stat no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir')
del tmp # No longer used
module_args = dict(
path=path,
follow=follow,
get_checksum=checksum,
get_size=False, # ansible.windows.win_stat added this in 1.11.0
checksum_algorithm='sha1',
)
# Unknown opts are ignored as module_args could be specific for the
# module that is being executed.
mystat = self._execute_module(module_name='ansible.legacy.stat', module_args=module_args, task_vars=all_vars,
wrap_async=False, ignore_unknown_opts=True)
if mystat.get('failed'):
msg = mystat.get('module_stderr')
if not msg:
msg = mystat.get('module_stdout')
if not msg:
msg = mystat.get('msg')
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if 'checksum' not in mystat['stat']:
mystat['stat']['checksum'] = ''
elif not isinstance(mystat['stat']['checksum'], string_types):
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
return mystat['stat']
def _remote_expand_user(self, path, sudoable=True, pathsep=None):
""" takes a remote path and performs tilde/$HOME expansion on the remote host """
# We only expand ~/path and ~username/path
if not path.startswith('~'):
return path
# Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home
# dir there.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
become_user = self.get_become_option('become_user')
if getattr(self._connection, '_remote_is_local', False):
pass
elif sudoable and self._connection.become and become_user:
expand_path = '~%s' % become_user
else:
# use remote user instead, if none set default to current user
expand_path = '~%s' % (self._get_remote_user() or '')
# use shell to construct appropriate command and execute
cmd = self._connection._shell.expand_user(expand_path)
data = self._low_level_execute_command(cmd, sudoable=False)
try:
initial_fragment = data['stdout'].strip().splitlines()[-1]
except IndexError:
initial_fragment = None
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Try using pwd, if not, return
# the original string
cmd = self._connection._shell.pwd()
pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip()
if pwd:
expanded = pwd
else:
expanded = path
elif len(split_path) > 1:
expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
expanded = initial_fragment
if '..' in os.path.dirname(expanded).split('/'):
raise AnsibleError("'%s' returned an invalid relative home directory path containing '..'" % self._get_remote_addr({}))
return expanded
def _strip_success_message(self, data):
"""
Removes the BECOME-SUCCESS message from the data.
"""
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _update_module_args(self, module_name, module_args, task_vars, ignore_unknown_opts: bool = False):
# set check mode in the module arguments, if required
if self._task.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# set no log in the module arguments, if required
no_target_syslog = C.config.get_config_value('DEFAULT_NO_TARGET_SYSLOG', variables=task_vars)
module_args['_ansible_no_log'] = self._task.no_log or no_target_syslog
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._task.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = display.verbosity
# give the module information about the ansible version
module_args['_ansible_version'] = __version__
# give the module information about its name
module_args['_ansible_module_name'] = module_name
# set the syslog facility to be used in the module
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
# what to do when parameter values are converted to strings
module_args['_ansible_string_conversion_action'] = C.STRING_CONVERSION_ACTION
# give the module the socket for persistent connections
module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
if not module_args['_ansible_socket']:
module_args['_ansible_socket'] = task_vars.get('ansible_socket')
# make sure all commands use the designated shell executable
module_args['_ansible_shell_executable'] = self._play_context.executable
# make sure modules are aware if they need to keep the remote files
module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
# make sure all commands use the designated temporary directory if created
if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir
module_args['_ansible_tmpdir'] = None
else:
module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
# make sure the remote_tmp value is sent through in case modules needs to create their own
module_args['_ansible_remote_tmp'] = self.get_shell_option('remote_tmp', default='~/.ansible/tmp')
# tells the module to ignore options that are not in its argspec.
module_args['_ansible_ignore_unknown_opts'] = ignore_unknown_opts
# allow user to insert string to add context to remote loggging
module_args['_ansible_target_log_info'] = C.config.get_config_value('TARGET_LOG_INFO', variables=task_vars)
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False,
ignore_unknown_opts: bool = False):
"""
Transfer and run a module along with its arguments.
"""
if tmp is not None:
display.warning('_execute_module no longer honors the tmp parameter. Action plugins'
' should set self._connection._shell.tmpdir to share the tmpdir')
del tmp # No longer used
if delete_remote_tmp is not None:
display.warning('_execute_module no longer honors the delete_remote_tmp parameter.'
' Action plugins should check self._connection._shell.tmpdir to'
' see if a tmpdir existed before they were called to determine'
' if they are responsible for removing it.')
del delete_remote_tmp # No longer used
tmpdir = self._connection._shell.tmpdir
# We set the module_style to new here so the remote_tmp is created
# before the module args are built if remote_tmp is needed (async).
# If the module_style turns out to not be new and we didn't create the
# remote tmp here, it will still be created. This must be done before
# calling self._update_module_args() so the module wrapper has the
# correct remote_tmp value set
if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
self._update_module_args(module_name, module_args, task_vars, ignore_unknown_opts=ignore_unknown_opts)
remove_async_dir = None
if wrap_async or self._task.async_val:
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
remove_async_dir = len(self._task.environment)
self._task.environment.append({"ANSIBLE_ASYNC_DIR": async_dir})
# FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
(module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
display.vvv("Using module file %s" % module_path)
if not shebang and module_style != 'binary':
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
self._used_interpreter = shebang
remote_module_path = None
if not self._is_pipelining_enabled(module_style, wrap_async):
# we might need remote tmp dir
if tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
remote_module_filename = self._connection._shell.get_remote_filename(module_path)
remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename)
args_file_path = None
if module_style in ('old', 'non_native_want_json', 'binary'):
# we'll also need a tmp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmpdir, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote %s" % remote_module_path)
if module_style == 'binary':
self._transfer_file(module_path, remote_module_path)
else:
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k, v in module_args.items():
args_data += '%s=%s ' % (k, shlex.quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style in ('non_native_want_json', 'binary'):
self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
# remove the ANSIBLE_ASYNC_DIR env entry if we added a temporary one for
# the async_wrapper task.
if remove_async_dir is not None:
del self._task.environment[remove_async_dir]
remote_files = []
if tmpdir and remote_module_path:
remote_files = [tmpdir, remote_module_path]
if args_file_path:
remote_files.append(args_file_path)
sudoable = True
in_data = None
cmd = ""
if wrap_async and not self._connection.always_pipeline_modules:
# configure, upload, and chmod the async_wrapper module
(async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(
module_name='ansible.legacy.async_wrapper', module_args=dict(), task_vars=task_vars)
async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
self._transfer_data(remote_async_module_path, async_module_data)
remote_files.append(remote_async_module_path)
async_limit = self._task.async_val
async_jid = f'j{secrets.randbelow(999999999999)}'
# call the interpreter for async_wrapper directly
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = shebang.replace('#!', '').strip()
async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
if environment_string:
async_cmd.insert(0, environment_string)
if args_file_path:
async_cmd.append(args_file_path)
else:
# maintain a fixed number of positional parameters for async_wrapper
async_cmd.append('_')
if not self._should_remove_tmp_path(tmpdir):
async_cmd.append("-preserve_tmp")
cmd = " ".join(to_text(x) for x in async_cmd)
else:
if self._is_pipelining_enabled(module_style):
in_data = module_data
display.vvv("Pipelining is enabled.")
else:
cmd = remote_module_path
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip()
# Fix permissions of the tmpdir path and tmpdir files. This should be called after all
# files have been transferred.
if remote_files:
# remove none/empty
remote_files = [x for x in remote_files if x]
self._fixup_perms2(remote_files, self._get_remote_user())
# actually execute
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
# parse the main result
data = self._parse_returned_data(res)
# NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
# get internal info before cleaning
if data.pop("_ansible_suppress_tmpdir_delete", False):
self._cleanup_remote_tmp = False
# NOTE: dnf returns results .. but that made it 'compatible' with squashing, so we allow mappings, for now
if 'results' in data and (not isinstance(data['results'], Sequence) or isinstance(data['results'], string_types)):
data['ansible_module_results'] = data['results']
del data['results']
display.warning("Found internal 'results' key in module return, renamed to 'ansible_module_results'.")
# remove internal keys
remove_internal_keys(data)
if wrap_async:
# async_wrapper will clean up its tmpdir on its own so we want the controller side to
# forget about it now
self._connection._shell.tmpdir = None
# FIXME: for backwards compat, figure out if still makes sense
data['changed'] = True
# pre-split stdout/stderr into lines if needed
if 'stdout' in data and 'stdout_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stdout', None) or u''
data['stdout_lines'] = txt.splitlines()
if 'stderr' in data and 'stderr_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stderr', None) or u''
data['stderr_lines'] = txt.splitlines()
# propagate interpreter discovery results back to the controller
if self._discovered_interpreter_key:
if data.get('ansible_facts') is None:
data['ansible_facts'] = {}
data['ansible_facts'][self._discovered_interpreter_key] = self._discovered_interpreter
if self._discovery_warnings:
if data.get('warnings') is None:
data['warnings'] = []
data['warnings'].extend(self._discovery_warnings)
if self._discovery_deprecation_warnings:
if data.get('deprecations') is None:
data['deprecations'] = []
data['deprecations'].extend(self._discovery_deprecation_warnings)
# mark the entire module results untrusted as a template right here, since the current action could
# possibly template one of these values.
data = wrap_var(data)
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _parse_returned_data(self, res):
try:
filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''), objects_only=True)
for w in warnings:
display.warning(w)
data = json.loads(filtered_output)
if C.MODULE_STRICT_UTF8_RESPONSE and not data.pop('_ansible_trusted_utf8', None):
try:
_validate_utf8_json(data)
except UnicodeEncodeError:
# When removing this, also remove the loop and latin-1 from ansible.module_utils.common.text.converters.jsonify
display.deprecated(
f'Module "{self._task.resolved_action or self._task.action}" returned non UTF-8 data in '
'the JSON response. This will become an error in the future',
version='2.18',
)
data['_ansible_parsed'] = True
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, _ansible_parsed=False)
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
data['module_stderr'] = res['stderr']
if res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
# in some cases a traceback will arrive on stdout instead of stderr, such as when using ssh with -tt
if 'exception' not in data and data['module_stdout'].startswith(u'Traceback'):
data['exception'] = data['module_stdout']
# The default
data['msg'] = "MODULE FAILURE"
# try to figure out if we are missing interpreter
if self._used_interpreter is not None:
interpreter = re.escape(self._used_interpreter.lstrip('!#'))
match = re.compile('%s: (?:No such file or directory|not found)' % interpreter)
if match.search(data['module_stderr']) or match.search(data['module_stdout']):
data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter."
# always append hint
data['msg'] += '\nSee stdout/stderr for the exact error'
if 'rc' in res:
data['rc'] = res['rc']
return data
# FIXME: move to connection base
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None):
"""
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
:kwarg chdir: cd into this directory before executing the command.
"""
display.debug("_low_level_execute_command(): starting")
# if not cmd:
# # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
# display.debug("_low_level_execute_command(): no command, exiting")
# return dict(stdout='', stderr='', rc=254)
if chdir:
display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir)
cmd = self._connection._shell.append_command('cd %s' % chdir, cmd)
# https://github.com/ansible/ansible/issues/68054
if executable:
self._connection._shell.executable = executable
ruser = self._get_remote_user()
buser = self.get_become_option('become_user')
if (sudoable and self._connection.become and # if sudoable and have become
resource_from_fqcr(self._connection.transport) != 'network_cli' and # if not using network_cli
(C.BECOME_ALLOW_SAME_USER or (buser != ruser or not any((ruser, buser))))): # if we allow same user PE or users are different and either is set
display.debug("_low_level_execute_command(): using become for this command")
cmd = self._connection.become.build_become_command(cmd, self._connection._shell)
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
# only applied for the default executable to avoid interfering with the raw action
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
if executable:
cmd = executable + ' -c ' + shlex.quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
# Change directory to basedir of task for command execution when connection is local
if self._connection.transport == 'local':
self._connection.cwd = to_bytes(self._loader.get_basedir(), errors='surrogate_or_strict')
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, binary_type):
out = to_text(stdout, errors=encoding_errors)
elif not isinstance(stdout, text_type):
out = to_text(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, binary_type):
err = to_text(stderr, errors=encoding_errors)
elif not isinstance(stderr, text_type):
err = to_text(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines())
def _get_diff_data(self, destination, source, task_vars, content=None, source_file=True):
# Note: Since we do not diff the source and destination before we transform from bytes into
# text the diff between source and destination may not be accurate. To fix this, we'd need
# to move the diffing from the callback plugins into here.
#
# Example of data which would cause trouble is src_content == b'\xff' and dest_content ==
# b'\xfe'. Neither of those are valid utf-8 so both get turned into the replacement
# character: diff['before'] = u'�' ; diff['after'] = u'�' When the callback plugin later
# diffs before and after it shows an empty diff.
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(
module_name='ansible.legacy.file', module_args=dict(path=destination, _diff_peek=True),
task_vars=task_vars, persist_files=True)
if peek_result.get('failed', False):
display.warning(u"Failed to get diff between '%s' and '%s': %s" % (os.path.basename(source), destination, to_text(peek_result.get(u'msg', u''))))
return diff
if peek_result.get('rc', 0) == 0:
if peek_result.get('state') in (None, 'absent'):
diff['before'] = u''
elif peek_result.get('appears_binary'):
diff['dst_binary'] = 1
elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug(u"Slurping the file %s" % source)
dest_result = self._execute_module(
module_name='ansible.legacy.slurp', module_args=dict(path=destination),
task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == u'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % to_native(dest_result))
diff['before_header'] = destination
diff['before'] = to_text(dest_contents)
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
with open(source, 'rb') as src:
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, to_native(e)))
if b"\x00" in src_contents:
diff['src_binary'] = 1
else:
if content:
diff['after_header'] = destination
else:
diff['after_header'] = source
diff['after'] = to_text(src_contents)
else:
display.debug(u"source of file passed in")
diff['after_header'] = u'dynamically generated'
diff['after'] = source
if self._task.no_log:
if 'before' in diff:
diff["before"] = u""
if 'after' in diff:
diff["after"] = u" [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n"
return diff
def _find_needle(self, dirname, needle):
"""
find a needle in haystack of paths, optionally using 'dirname' as a subdir.
This will build the ordered list of paths to search and pass them to dwim
to get back the first existing file found.
"""
# dwim already deals with playbook basedirs
path_stack = self._task.get_search_path()
# if missing it will return a file not found exception
return self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
| 69,202
|
Python
|
.py
| 1,197
| 45.028404
| 160
| 0.611573
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,387
|
package.py
|
ansible_ansible/lib/ansible/plugins/action/package.py
|
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
class ActionModule(ActionBase):
TRANSFERS_FILES = False
BUILTIN_PKG_MGR_MODULES = {manager['name'] for manager in PKG_MGRS}
def run(self, tmp=None, task_vars=None):
""" handler for package operations """
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
module = self._task.args.get('use', 'auto')
try:
if module == 'auto':
if self._task.delegate_to:
hosts_vars = task_vars['hostvars'][self._task.delegate_to]
tvars = combine_vars(self._task.vars, task_vars.get('delegated_vars', {}))
else:
hosts_vars = task_vars
tvars = task_vars
# use config
module = tvars.get('ansible_package_use', None)
if not module:
# no use, no config, get from facts
if hosts_vars.get('ansible_facts', {}).get('pkg_mgr', False):
facts = hosts_vars
pmgr = 'pkg_mgr'
else:
# we had no facts, so generate them
# very expensive step, we actually run fact gathering because we don't have facts for this host.
facts = self._execute_module(
module_name='ansible.legacy.setup',
module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'),
task_vars=task_vars,
)
if facts.get("failed", False):
raise AnsibleActionFail(
f"Failed to fetch ansible_pkg_mgr to determine the package action backend: {facts.get('msg')}",
result=facts,
)
pmgr = 'ansible_pkg_mgr'
try:
# actually get from facts
module = facts['ansible_facts'][pmgr]
except KeyError:
raise AnsibleActionFail('Could not detect a package manager. Try using the "use" option.')
if module and module != 'auto':
if not self._shared_loader_obj.module_loader.has_plugin(module):
raise AnsibleActionFail('Could not find a matching action for the "%s" package manager.' % module)
else:
# run the 'package' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
# get defaults for specific module
context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
new_module_args = get_action_args_with_defaults(
context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar,
action_groups=self._task._parent._play._action_groups
)
if module in self.BUILTIN_PKG_MGR_MODULES:
# prefix with ansible.legacy to eliminate external collisions while still allowing library/ override
module = 'ansible.legacy.' + module
display.vvvv("Running %s" % module)
result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
else:
raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.')
except AnsibleAction as e:
result.update(e.result)
return result
| 5,014
|
Python
|
.py
| 91
| 40.241758
| 158
| 0.581191
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,388
|
script.py
|
ansible_ansible/lib/ansible/plugins/action/script.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import re
import shlex
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail, AnsibleActionSkip
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
# On Windows platform, absolute paths begin with a (back)slash
# after chopping off a potential drive letter.
windows_absolute_path_detection = re.compile(r'^(?:[a-zA-Z]\:)?(\\|\/)')
def run(self, tmp=None, task_vars=None):
""" handler for file transfer operations """
if task_vars is None:
task_vars = dict()
validation_result, new_module_args = self.validate_argument_spec(
argument_spec={
'_raw_params': {},
'cmd': {'type': 'str'},
'creates': {'type': 'str'},
'removes': {'type': 'str'},
'chdir': {'type': 'str'},
'executable': {'type': 'str'},
},
required_one_of=[
['_raw_params', 'cmd']
]
)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
try:
creates = new_module_args['creates']
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if self._remote_file_exists(creates):
raise AnsibleActionSkip("%s exists, matching creates option" % creates)
removes = new_module_args['removes']
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not self._remote_file_exists(removes):
raise AnsibleActionSkip("%s does not exist, matching removes option" % removes)
# The chdir must be absolute, because a relative path would rely on
# remote node behaviour & user config.
chdir = new_module_args['chdir']
if chdir:
# Powershell is the only Windows-path aware shell
if getattr(self._connection._shell, "_IS_WINDOWS", False) and \
not self.windows_absolute_path_detection.match(chdir):
raise AnsibleActionFail('chdir %s must be an absolute path for a Windows remote node' % chdir)
# Every other shell is unix-path-aware.
if not getattr(self._connection._shell, "_IS_WINDOWS", False) and not chdir.startswith('/'):
raise AnsibleActionFail('chdir %s must be an absolute path for a Unix-aware remote node' % chdir)
# Split out the script as the first item in raw_params using
# shlex.split() in order to support paths and files with spaces in the name.
# Any arguments passed to the script will be added back later.
raw_params = to_native(new_module_args.get('_raw_params', ''), errors='surrogate_or_strict')
parts = [to_text(s, errors='surrogate_or_strict') for s in shlex.split(raw_params.strip())]
source = parts[0]
# Support executable paths and files with spaces in the name.
executable = new_module_args['executable']
if executable:
executable = to_native(new_module_args['executable'], errors='surrogate_or_strict')
try:
source = self._loader.get_real_file(self._find_needle('files', source), decrypt=self._task.args.get('decrypt', True))
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
if self._task.check_mode:
# check mode is supported if 'creates' or 'removes' are provided
# the task has already been skipped if a change would not occur
if new_module_args['creates'] or new_module_args['removes']:
result['changed'] = True
raise _AnsibleActionDone(result=result)
# If the script doesn't return changed in the result, it defaults to True,
# but since the script may override 'changed', just skip instead of guessing.
else:
result['changed'] = False
raise AnsibleActionSkip('Check mode is not supported for this task.', result=result)
# now we execute script, always assume changed.
result['changed'] = True
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir,
os.path.basename(source))
# Convert raw_params to text for the purpose of replacing the script since
# parts and tmp_src are both unicode strings and raw_params will be different
# depending on Python version.
#
# Once everything is encoded consistently, replace the script path on the remote
# system with the remainder of the raw_params. This preserves quoting in parameters
# that would have been removed by shlex.split().
target_command = to_text(raw_params).strip().replace(parts[0], tmp_src)
self._transfer_file(source, tmp_src)
# set file permissions, more permissive when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src), execute=True)
# add preparation steps to one ssh roundtrip executing the script
env_dict = dict()
env_string = self._compute_environment_string(env_dict)
if executable:
script_cmd = ' '.join([env_string, executable, target_command])
else:
script_cmd = ' '.join([env_string, target_command])
script_cmd = self._connection._shell.wrap_for_exec(script_cmd)
exec_data = None
# PowerShell runs the script in a special wrapper to enable things
# like become and environment args
if getattr(self._connection._shell, "_IS_WINDOWS", False):
# FUTURE: use a more public method to get the exec payload
pc = self._task
exec_data = ps_manifest._create_powershell_wrapper(
to_bytes(script_cmd), source, {}, env_dict, self._task.async_val,
pc.become, pc.become_method, pc.become_user,
self._play_context.become_pass, pc.become_flags, "script", task_vars, None
)
# build the necessary exec wrapper command
# FUTURE: this still doesn't let script work on Windows with non-pipelined connections or
# full manual exec of KEEP_REMOTE_FILES
script_cmd = self._connection._shell.build_module_command(env_string='', shebang='#!powershell', cmd='')
result.update(self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir))
if 'rc' in result and result['rc'] != 0:
raise AnsibleActionFail('non-zero return code')
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 8,482
|
Python
|
.py
| 146
| 45.452055
| 133
| 0.615912
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,389
|
set_stats.py
|
ansible_ansible/lib/ansible/plugins/action/set_stats.py
|
# Copyright 2016 Ansible (RedHat, Inc)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.vars import isidentifier
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('aggregate', 'data', 'per_host'))
_requires_connection = False
# TODO: document this in non-empty set_stats.py module
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
stats = {'data': {}, 'per_host': False, 'aggregate': True}
if self._task.args:
data = self._task.args.get('data', {})
if not isinstance(data, dict):
data = self._templar.template(data, convert_bare=False, fail_on_undefined=True)
if not isinstance(data, dict):
result['failed'] = True
result['msg'] = "The 'data' option needs to be a dictionary/hash"
return result
# set boolean options, defaults are set above in stats init
for opt in ['per_host', 'aggregate']:
val = self._task.args.get(opt, None)
if val is not None:
if not isinstance(val, bool):
stats[opt] = boolean(self._templar.template(val), strict=False)
else:
stats[opt] = val
for (k, v) in data.items():
k = self._templar.template(k)
if not isidentifier(k):
result['failed'] = True
result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
"letters, numbers and underscores." % k)
return result
stats['data'][k] = self._templar.template(v)
result['changed'] = False
result['ansible_stats'] = stats
return result
| 2,819
|
Python
|
.py
| 58
| 38.724138
| 153
| 0.620489
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,390
|
uri.py
|
ansible_ansible/lib/ansible/plugins/action/uri.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
# (c) 2018, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.collections import Mapping, MutableMapping
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
self._supports_async = True
self._supports_check_mode = False
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
body_format = self._task.args.get('body_format', 'raw')
body = self._task.args.get('body')
src = self._task.args.get('src', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)
try:
if remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
# call with ansible.legacy prefix to prevent collections collisions while allowing local override
raise _AnsibleActionDone(result=self._execute_module(module_name='ansible.legacy.uri',
task_vars=task_vars, wrap_async=self._task.async_val))
kwargs = {}
if src:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
kwargs['src'] = tmp_src
self._transfer_file(src, tmp_src)
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
elif body_format == 'form-multipart':
if not isinstance(body, Mapping):
raise AnsibleActionFail(
'body must be mapping, cannot be type %s' % body.__class__.__name__
)
for field, value in body.items():
if not isinstance(value, MutableMapping):
continue
content = value.get('content')
filename = value.get('filename')
if not filename or content:
continue
try:
filename = self._find_needle('files', filename)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
tmp_src = self._connection._shell.join_path(
self._connection._shell.tmpdir,
os.path.basename(filename)
)
value['filename'] = tmp_src
self._transfer_file(filename, tmp_src)
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
kwargs['body'] = body
new_module_args = self._task.args | kwargs
# call with ansible.legacy prefix to prevent collections collisions while allowing local override
result.update(self._execute_module('ansible.legacy.uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 3,981
|
Python
|
.py
| 74
| 39.135135
| 152
| 0.579583
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,391
|
debug.py
|
ansible_ansible/lib/ansible/plugins/action/debug.py
|
# Copyright 2012, Dag Wieers <dag@wieers.com>
# Copyright 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
""" Print statements during execution """
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('msg', 'var', 'verbosity'))
_requires_connection = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
validation_result, new_module_args = self.validate_argument_spec(
argument_spec={
'msg': {'type': 'raw', 'default': 'Hello world!'},
'var': {'type': 'raw'},
'verbosity': {'type': 'int', 'default': 0},
},
mutually_exclusive=(
('msg', 'var'),
),
)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# get task verbosity
verbosity = new_module_args['verbosity']
if verbosity <= self._display.verbosity:
if new_module_args['var']:
try:
results = self._templar.template(new_module_args['var'], convert_bare=True, fail_on_undefined=True)
if results == new_module_args['var']:
# if results is not str/unicode type, raise an exception
if not isinstance(results, string_types):
raise AnsibleUndefinedVariable
# If var name is same as result, try to template it
results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True)
except AnsibleUndefinedVariable as e:
results = u"VARIABLE IS NOT DEFINED!"
if self._display.verbosity > 0:
results += u": %s" % to_text(e)
if isinstance(new_module_args['var'], (list, dict)):
# If var is a list or dict, use the type as key to display
result[to_text(type(new_module_args['var']))] = results
else:
result[new_module_args['var']] = results
else:
result['msg'] = new_module_args['msg']
# force flag to make debug output module always verbose
result['_ansible_verbose_always'] = True
else:
result['skipped_reason'] = "Verbosity threshold not met."
result['skipped'] = True
result['failed'] = False
return result
| 3,472
|
Python
|
.py
| 72
| 37.708333
| 122
| 0.606141
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,392
|
shell.py
|
ansible_ansible/lib/ansible/plugins/action/shell.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.errors import AnsibleActionFail
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
# Shell module is implemented via command with a special arg
self._task.args['_uses_shell'] = True
# Shell shares the same module code as command. Fail if command
# specific options are set.
if "expand_argument_vars" in self._task.args:
raise AnsibleActionFail(f"Unsupported parameters for ({self._task.action}) module: expand_argument_vars")
command_action = self._shared_loader_obj.action_loader.get('ansible.legacy.command',
task=self._task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj)
result = command_action.run(task_vars=task_vars)
return result
| 1,527
|
Python
|
.py
| 23
| 43.695652
| 117
| 0.534448
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,393
|
service.py
|
ansible_ansible/lib/ansible/plugins/action/service.py
|
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
UNUSED_PARAMS = {
'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
}
# HACK: list of unqualified service manager names that are/were built-in, we'll prefix these with `ansible.legacy` to
# avoid collisions with collections search
BUILTIN_SVC_MGR_MODULES = set(['openwrt_init', 'service', 'systemd', 'sysvinit'])
def run(self, tmp=None, task_vars=None):
""" handler for package operations """
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto').lower()
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['service_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template('{{ansible_facts.service_mgr}}')
except Exception:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(
module_name='ansible.legacy.setup',
module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto')
if not module or module == 'auto' or not self._shared_loader_obj.module_loader.has_plugin(module):
module = 'ansible.legacy.service'
if module != 'auto':
# run the 'service' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
if module in self.UNUSED_PARAMS:
for unused in self.UNUSED_PARAMS[module]:
if unused in new_module_args:
del new_module_args[unused]
self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
# get defaults for specific module
context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
new_module_args = get_action_args_with_defaults(
context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar,
action_groups=self._task._parent._play._action_groups
)
# collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides
if module in self.BUILTIN_SVC_MGR_MODULES:
module = 'ansible.legacy.' + module
self._display.vvvv("Running %s" % module)
result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
else:
raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 4,563
|
Python
|
.py
| 81
| 45
| 154
| 0.626765
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,394
|
set_fact.py
|
ansible_ansible/lib/ansible/plugins/action/set_fact.py
|
# Copyright 2013 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleActionFail
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.vars import isidentifier
import ansible.constants as C
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_requires_connection = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
facts = {}
cacheable = boolean(self._task.args.pop('cacheable', False))
if self._task.args:
for (k, v) in self._task.args.items():
k = self._templar.template(k)
if not isidentifier(k):
raise AnsibleActionFail("The variable name '%s' is not valid. Variables must start with a letter or underscore character, "
"and contain only letters, numbers and underscores." % k)
# NOTE: this should really use BOOLEANS from convert_bool, but only in the k=v case,
# right now it converts matching explicit YAML strings also when 'jinja2_native' is disabled.
if not C.DEFAULT_JINJA2_NATIVE and isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v, strict=False)
facts[k] = v
else:
raise AnsibleActionFail('No key/value pairs provided, at least one is required for this action to succeed')
if facts:
# just as _facts actions, we don't set changed=true as we are not modifying the actual host
result['ansible_facts'] = facts
result['_ansible_facts_cacheable'] = cacheable
else:
# this should not happen, but JIC we get here
raise AnsibleActionFail('Unable to create any variables with provided arguments')
return result
| 2,798
|
Python
|
.py
| 54
| 43.740741
| 143
| 0.676557
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,395
|
validate_argument_spec.py
|
ansible_ansible/lib/ansible/plugins/action/validate_argument_spec.py
|
# Copyright 2021 Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
from ansible.utils.vars import combine_vars
class ActionModule(ActionBase):
""" Validate an arg spec"""
TRANSFERS_FILES = False
_requires_connection = False
def get_args_from_task_vars(self, argument_spec, task_vars):
"""
Get any arguments that may come from `task_vars`.
Expand templated variables so we can validate the actual values.
:param argument_spec: A dict of the argument spec.
:param task_vars: A dict of task variables.
:returns: A dict of values that can be validated against the arg spec.
"""
args = {}
for argument_name, argument_attrs in argument_spec.items():
if argument_name in task_vars:
args[argument_name] = task_vars[argument_name]
args = self._templar.template(args)
return args
def run(self, tmp=None, task_vars=None):
"""
Validate an argument specification against a provided set of data.
The `validate_argument_spec` module expects to receive the arguments:
- argument_spec: A dict whose keys are the valid argument names, and
whose values are dicts of the argument attributes (type, etc).
- provided_arguments: A dict whose keys are the argument names, and
whose values are the argument value.
:param tmp: Deprecated. Do not use.
:param task_vars: A dict of task variables.
:return: An action result dict, including a 'argument_errors' key with a
list of validation errors found.
"""
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# This action can be called from anywhere, so pass in some info about what it is
# validating args for so the error results make some sense
result['validate_args_context'] = self._task.args.get('validate_args_context', {})
if 'argument_spec' not in self._task.args:
raise AnsibleError('"argument_spec" arg is required in args: %s' % self._task.args)
# Get the task var called argument_spec. This will contain the arg spec
# data dict (for the proper entry point for a role).
argument_spec_data = self._task.args.get('argument_spec')
# the values that were passed in and will be checked against argument_spec
provided_arguments = self._task.args.get('provided_arguments', {})
if not isinstance(argument_spec_data, dict):
raise AnsibleError('Incorrect type for argument_spec, expected dict and got %s' % type(argument_spec_data))
if not isinstance(provided_arguments, dict):
raise AnsibleError('Incorrect type for provided_arguments, expected dict and got %s' % type(provided_arguments))
args_from_vars = self.get_args_from_task_vars(argument_spec_data, task_vars)
validator = ArgumentSpecValidator(argument_spec_data)
validation_result = validator.validate(combine_vars(args_from_vars, provided_arguments), validate_role_argument_spec=True)
if validation_result.error_messages:
result['failed'] = True
result['msg'] = 'Validation of arguments failed:\n%s' % '\n'.join(validation_result.error_messages)
result['argument_spec_data'] = argument_spec_data
result['argument_errors'] = validation_result.error_messages
return result
result['changed'] = False
result['msg'] = 'The arg spec validation passed'
return result
| 3,937
|
Python
|
.py
| 68
| 48.602941
| 130
| 0.674642
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,396
|
gather_facts.py
|
ansible_ansible/lib/ansible/plugins/action/gather_facts.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import time
import typing as t
from ansible import constants as C
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
_supports_check_mode = True
def _get_module_args(self, fact_module: str, task_vars: dict[str, t.Any]) -> dict[str, t.Any]:
mod_args = self._task.args.copy()
# deal with 'setup specific arguments'
if fact_module not in C._ACTION_SETUP:
# TODO: remove in favor of controller side argspec detecting valid arguments
# network facts modules must support gather_subset
try:
name = self._connection.ansible_name.removeprefix('ansible.netcommon.')
except AttributeError:
name = self._connection._load_name.split('.')[-1]
if name not in ('network_cli', 'httpapi', 'netconf'):
subset = mod_args.pop('gather_subset', None)
if subset not in ('all', ['all'], None):
self._display.warning('Not passing subset(%s) to %s' % (subset, fact_module))
timeout = mod_args.pop('gather_timeout', None)
if timeout is not None:
self._display.warning('Not passing timeout(%s) to %s' % (timeout, fact_module))
fact_filter = mod_args.pop('filter', None)
if fact_filter is not None:
self._display.warning('Not passing filter(%s) to %s' % (fact_filter, fact_module))
# Strip out keys with ``None`` values, effectively mimicking ``omit`` behavior
# This ensures we don't pass a ``None`` value as an argument expecting a specific type
mod_args = dict((k, v) for k, v in mod_args.items() if v is not None)
# handle module defaults
resolved_fact_module = self._shared_loader_obj.module_loader.find_plugin_with_context(
fact_module, collection_list=self._task.collections
).resolved_fqcn
mod_args = get_action_args_with_defaults(
resolved_fact_module, mod_args, self._task.module_defaults, self._templar,
action_groups=self._task._parent._play._action_groups
)
return mod_args
def _combine_task_result(self, result: dict[str, t.Any], task_result: dict[str, t.Any]) -> dict[str, t.Any]:
filtered_res = {
'ansible_facts': task_result.get('ansible_facts', {}),
'warnings': task_result.get('warnings', []),
'deprecations': task_result.get('deprecations', []),
}
# on conflict the last plugin processed wins, but try to do deep merge and append to lists.
return merge_hash(result, filtered_res, list_merge='append_rp')
def run(self, tmp: t.Optional[str] = None, task_vars: t.Optional[dict[str, t.Any]] = None) -> dict[str, t.Any]:
result = super(ActionModule, self).run(tmp, task_vars)
result['ansible_facts'] = {}
# copy the value with list() so we don't mutate the config
modules = list(C.config.get_config_value('FACTS_MODULES', variables=task_vars))
parallel = task_vars.pop('ansible_facts_parallel', self._task.args.pop('parallel', None))
if 'smart' in modules:
connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars)
network_os = self._task.args.get('network_os', task_vars.get('ansible_network_os', task_vars.get('ansible_facts', {}).get('network_os')))
modules.extend([connection_map.get(network_os or self._connection.ansible_name, 'ansible.legacy.setup')])
modules.pop(modules.index('smart'))
failed = {}
skipped = {}
if parallel is None:
if len(modules) > 1:
parallel = True
else:
parallel = False
else:
parallel = boolean(parallel)
timeout = self._task.args.get('gather_timeout', None)
async_val = self._task.async_val
if not parallel:
# serially execute each module
for fact_module in modules:
# just one module, no need for fancy async
mod_args = self._get_module_args(fact_module, task_vars)
# TODO: use gather_timeout to cut module execution if module itself does not support gather_timeout
res = self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=False)
if res.get('failed', False):
failed[fact_module] = res
elif res.get('skipped', False):
skipped[fact_module] = res
else:
result = self._combine_task_result(result, res)
self._remove_tmp_path(self._connection._shell.tmpdir)
else:
# do it async, aka parallel
jobs = {}
for fact_module in modules:
mod_args = self._get_module_args(fact_module, task_vars)
# if module does not handle timeout, use timeout to handle module, hijack async_val as this is what async_wrapper uses
# TODO: make this action complain about async/async settings, use parallel option instead .. or remove parallel in favor of async settings?
if timeout and 'gather_timeout' not in mod_args:
self._task.async_val = int(timeout)
elif async_val != 0:
self._task.async_val = async_val
else:
self._task.async_val = 0
self._display.vvvv("Running %s" % fact_module)
jobs[fact_module] = (self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=True))
while jobs:
for module in jobs:
poll_args = {'jid': jobs[module]['ansible_job_id'], '_async_dir': os.path.dirname(jobs[module]['results_file'])}
res = self._execute_module(module_name='ansible.legacy.async_status', module_args=poll_args, task_vars=task_vars, wrap_async=False)
if res.get('finished', 0) == 1:
if res.get('failed', False):
failed[module] = res
elif res.get('skipped', False):
skipped[module] = res
else:
result = self._combine_task_result(result, res)
del jobs[module]
break
else:
time.sleep(0.1)
else:
time.sleep(0.5)
# restore value for post processing
if self._task.async_val != async_val:
self._task.async_val = async_val
if skipped:
result['msg'] = "The following modules were skipped: %s\n" % (', '.join(skipped.keys()))
result['skipped_modules'] = skipped
if len(skipped) == len(modules):
result['skipped'] = True
if failed:
result['failed'] = True
result['msg'] = "The following modules failed to execute: %s\n" % (', '.join(failed.keys()))
result['failed_modules'] = failed
# tell executor facts were gathered
result['ansible_facts']['_ansible_facts_gathered'] = True
# hack to keep --verbose from showing all the setup module result
result['_ansible_verbose_override'] = True
return result
| 7,863
|
Python
|
.py
| 138
| 43.797101
| 155
| 0.590661
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,397
|
assemble.py
|
ansible_ansible/lib/ansible/plugins/action/assemble.py
|
# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
# Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import annotations
import codecs
import os
import os.path
import re
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
""" assemble a file from a directory of fragments """
tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh:
fragment_content = fragment_fh.read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b'\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b'\n':
tmp.write(b'\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b'\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if task_vars is None:
task_vars = dict()
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
decrypt = self._task.args.pop('decrypt', True)
try:
if src is None or dest is None:
raise AnsibleActionFail("src and dest are required")
if boolean(remote_src, strict=False):
# call assemble via ansible.legacy to allow library/ overrides of the module without collection search
result.update(self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars))
raise _AnsibleActionDone()
else:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
if not os.path.isdir(src):
raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args['dest'] = dest
if path_checksum != dest_stat['checksum']:
if self._task.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
new_module_args.update(dict(src=xfered,))
res = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
if diff:
res['diff'] = diff
result.update(res)
else:
result.update(self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| 6,529
|
Python
|
.py
| 130
| 39.169231
| 136
| 0.616436
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,398
|
async_status.py
|
ansible_ansible/lib/ansible/plugins/action/async_status.py
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
def _get_async_dir(self):
# async directory based on the shell option
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
return self._remote_expand_user(async_dir)
def run(self, tmp=None, task_vars=None):
results = super(ActionModule, self).run(tmp, task_vars)
validation_result, new_module_args = self.validate_argument_spec(
argument_spec={
'jid': {'type': 'str', 'required': True},
'mode': {'type': 'str', 'choices': ['status', 'cleanup'], 'default': 'status'},
},
)
# initialize response
results['started'] = results['finished'] = 0
results['stdout'] = results['stderr'] = ''
results['stdout_lines'] = results['stderr_lines'] = []
jid = new_module_args["jid"]
mode = new_module_args["mode"]
results['ansible_job_id'] = jid
async_dir = self._get_async_dir()
log_path = self._connection._shell.join_path(async_dir, jid)
if mode == 'cleanup':
results['erased'] = log_path
else:
results['results_file'] = log_path
results['started'] = 1
new_module_args['_async_dir'] = async_dir
results = merge_hash(results, self._execute_module(module_name='ansible.legacy.async_status', task_vars=task_vars, module_args=new_module_args))
return results
| 1,726
|
Python
|
.py
| 35
| 40.428571
| 152
| 0.620299
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,399
|
reboot.py
|
ansible_ansible/lib/ansible/plugins/action/reboot.py
|
# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import secrets
import time
from datetime import datetime, timedelta, timezone
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.validation import check_type_list, check_type_str
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset((
'boot_time_command',
'connect_timeout',
'msg',
'post_reboot_delay',
'pre_reboot_delay',
'reboot_command',
'reboot_timeout',
'search_paths',
'test_command',
))
DEFAULT_REBOOT_TIMEOUT = 600
DEFAULT_CONNECT_TIMEOUT = None
DEFAULT_PRE_REBOOT_DELAY = 0
DEFAULT_POST_REBOOT_DELAY = 0
DEFAULT_TEST_COMMAND = 'whoami'
DEFAULT_BOOT_TIME_COMMAND = 'cat /proc/sys/kernel/random/boot_id'
DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible'
DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
DEFAULT_SHUTDOWN_COMMAND_ARGS = '-r {delay_min} "{message}"'
DEFAULT_SUDOABLE = True
DEPRECATED_ARGS = {} # type: dict[str, str]
BOOT_TIME_COMMANDS = {
'freebsd': '/sbin/sysctl kern.boottime',
'openbsd': '/sbin/sysctl kern.boottime',
'macosx': 'who -b',
'solaris': 'who -b',
'sunos': 'who -b',
'vmkernel': 'grep booted /var/log/vmksummary.log | tail -n 1',
'aix': 'who -b',
}
SHUTDOWN_COMMANDS = {
'alpine': 'reboot',
'vmkernel': 'reboot',
}
SHUTDOWN_COMMAND_ARGS = {
'alpine': '',
'void': '-r +{delay_min} "{message}"',
'freebsd': '-r +{delay_sec}s "{message}"',
'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
'macosx': '-r +{delay_min} "{message}"',
'openbsd': '-r +{delay_min} "{message}"',
'solaris': '-y -g {delay_sec} -i 6 "{message}"',
'sunos': '-y -g {delay_sec} -i 6 "{message}"',
'vmkernel': '-d {delay_sec}',
'aix': '-Fr',
}
TEST_COMMANDS = {
'solaris': 'who',
'vmkernel': 'who',
}
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(*args, **kwargs)
@property
def pre_reboot_delay(self):
return self._check_delay('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY)
@property
def post_reboot_delay(self):
return self._check_delay('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY)
def _check_delay(self, key, default):
"""Ensure that the value is positive or zero"""
value = int(self._task.args.get(key, self._task.args.get(key + '_sec', default)))
if value < 0:
value = 0
return value
def _get_value_from_facts(self, variable_name, distribution, default_value):
"""Get dist+version specific args first, then distribution, then family, lastly use default"""
attr = getattr(self, variable_name)
value = attr.get(
distribution['name'] + distribution['version'],
attr.get(
distribution['name'],
attr.get(
distribution['family'],
getattr(self, default_value))))
return value
def get_shutdown_command_args(self, distribution):
reboot_command = self._task.args.get('reboot_command')
if reboot_command is not None:
try:
reboot_command = check_type_str(reboot_command, allow_conversion=False)
except TypeError as e:
raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e))
# No args were provided
try:
return reboot_command.split(' ', 1)[1]
except IndexError:
return ''
else:
args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
# Convert seconds to minutes. If less than 60, set it to 0.
delay_min = self.pre_reboot_delay // 60
reboot_message = self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE)
return args.format(delay_sec=self.pre_reboot_delay, delay_min=delay_min, message=reboot_message)
def get_distribution(self, task_vars):
# FIXME: only execute the module if we don't already have the facts we need
distribution = {}
display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
module_output = self._execute_module(
task_vars=task_vars,
module_name='ansible.legacy.setup',
module_args={'gather_subset': 'min'})
try:
if module_output.get('failed', False):
raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
to_native(module_output['module_stdout']).strip(),
to_native(module_output['module_stderr']).strip()))
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
return distribution
except KeyError as ke:
raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
def get_shutdown_command(self, task_vars, distribution):
reboot_command = self._task.args.get('reboot_command')
if reboot_command is not None:
try:
reboot_command = check_type_str(reboot_command, allow_conversion=False)
except TypeError as e:
raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e))
shutdown_bin = reboot_command.split(' ', 1)[0]
else:
shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
if shutdown_bin[0] == '/':
return shutdown_bin
else:
default_search_paths = ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin']
search_paths = self._task.args.get('search_paths', default_search_paths)
try:
# Convert bare strings to a list
search_paths = check_type_list(search_paths)
except TypeError:
err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
raise AnsibleError(err_msg.format(search_paths))
display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
action=self._task.action,
command=shutdown_bin,
paths=search_paths))
find_result = self._execute_module(
task_vars=task_vars,
# prevent collection search by calling with ansible.legacy (still allows library/ override of find)
module_name='ansible.legacy.find',
module_args={
'paths': search_paths,
'patterns': [shutdown_bin],
'file_type': 'any'
}
)
full_path = [x['path'] for x in find_result['files']]
if not full_path:
raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
return full_path[0]
def deprecated_args(self):
for arg, version in self.DEPRECATED_ARGS.items():
if self._task.args.get(arg) is not None:
display.warning("Since Ansible {version}, {arg} is no longer a valid option for {action}".format(
version=version,
arg=arg,
action=self._task.action))
def get_system_boot_time(self, distribution):
boot_time_command = self._get_value_from_facts('BOOT_TIME_COMMANDS', distribution, 'DEFAULT_BOOT_TIME_COMMAND')
if self._task.args.get('boot_time_command'):
boot_time_command = self._task.args.get('boot_time_command')
try:
check_type_str(boot_time_command, allow_conversion=False)
except TypeError as e:
raise AnsibleError("Invalid value given for 'boot_time_command': %s." % to_native(e))
display.debug("{action}: getting boot time with command: '{command}'".format(action=self._task.action, command=boot_time_command))
command_result = self._low_level_execute_command(boot_time_command, sudoable=self.DEFAULT_SUDOABLE)
if command_result['rc'] != 0:
stdout = command_result['stdout']
stderr = command_result['stderr']
raise AnsibleError("{action}: failed to get host boot time info, rc: {rc}, stdout: {out}, stderr: {err}".format(
action=self._task.action,
rc=command_result['rc'],
out=to_native(stdout),
err=to_native(stderr)))
display.debug("{action}: last boot time: {boot}".format(action=self._task.action, boot=command_result['stdout'].strip()))
return command_result['stdout'].strip()
def check_boot_time(self, distribution, previous_boot_time):
display.vvv("{action}: attempting to get system boot time".format(action=self._task.action))
connect_timeout = self._task.args.get('connect_timeout', self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
# override connection timeout from defaults to the custom value
if connect_timeout:
try:
display.debug("{action}: setting connect_timeout to {value}".format(action=self._task.action, value=connect_timeout))
self._connection.set_option("connection_timeout", connect_timeout)
except AnsibleError:
try:
self._connection.set_option("timeout", connect_timeout)
except (AnsibleError, AttributeError):
display.warning("Connection plugin does not allow the connection timeout to be overridden")
self._connection.reset()
# try and get boot time
try:
current_boot_time = self.get_system_boot_time(distribution)
except Exception as e:
raise e
# FreeBSD returns an empty string immediately before reboot so adding a length
# check to prevent prematurely assuming system has rebooted
if len(current_boot_time) == 0 or current_boot_time == previous_boot_time:
raise ValueError("boot time has not changed")
def run_test_command(self, distribution, **kwargs):
test_command = self._task.args.get('test_command', self._get_value_from_facts('TEST_COMMANDS', distribution, 'DEFAULT_TEST_COMMAND'))
display.vvv("{action}: attempting post-reboot test command".format(action=self._task.action))
display.debug("{action}: attempting post-reboot test command '{command}'".format(action=self._task.action, command=test_command))
try:
command_result = self._low_level_execute_command(test_command, sudoable=self.DEFAULT_SUDOABLE)
except Exception:
# may need to reset the connection in case another reboot occurred
# which has invalidated our connection
try:
self._connection.reset()
except AttributeError:
pass
raise
if command_result['rc'] != 0:
msg = 'Test command failed: {err} {out}'.format(
err=to_native(command_result['stderr']),
out=to_native(command_result['stdout']))
raise RuntimeError(msg)
display.vvv("{action}: system successfully rebooted".format(action=self._task.action))
def do_until_success_or_timeout(self, action, reboot_timeout, action_desc, distribution, action_kwargs=None):
max_end_time = datetime.now(timezone.utc) + timedelta(seconds=reboot_timeout)
if action_kwargs is None:
action_kwargs = {}
fail_count = 0
max_fail_sleep = 12
last_error_msg = ''
while datetime.now(timezone.utc) < max_end_time:
try:
action(distribution=distribution, **action_kwargs)
if action_desc:
display.debug('{action}: {desc} success'.format(action=self._task.action, desc=action_desc))
return
except Exception as e:
if isinstance(e, AnsibleConnectionFailure):
try:
self._connection.reset()
except AnsibleConnectionFailure:
pass
# Use exponential backoff with a max timeout, plus a little bit of randomness
random_int = secrets.randbelow(1000) / 1000
fail_sleep = 2 ** fail_count + random_int
if fail_sleep > max_fail_sleep:
fail_sleep = max_fail_sleep + random_int
if action_desc:
try:
error = to_text(e).splitlines()[-1]
except IndexError as e:
error = to_text(e)
last_error_msg = f"{self._task.action}: {action_desc} fail '{error}'"
msg = f"{last_error_msg}, retrying in {fail_sleep:.4f} seconds..."
display.debug(msg)
display.vvv(msg)
fail_count += 1
time.sleep(fail_sleep)
if last_error_msg:
msg = f"Last error message before the timeout exception - {last_error_msg}"
display.debug(msg)
display.vvv(msg)
raise TimedOutException('Timed out waiting for {desc} (timeout={timeout})'.format(desc=action_desc, timeout=reboot_timeout))
def perform_reboot(self, task_vars, distribution):
result = {}
reboot_result = {}
shutdown_command = self.get_shutdown_command(task_vars, distribution)
shutdown_command_args = self.get_shutdown_command_args(distribution)
reboot_command = '{0} {1}'.format(shutdown_command, shutdown_command_args)
try:
display.vvv("{action}: rebooting server...".format(action=self._task.action))
display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command))
reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
except AnsibleConnectionFailure as e:
# If the connection is closed too quickly due to the system being shutdown, carry on
display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
reboot_result['rc'] = 0
result['start'] = datetime.now(timezone.utc)
if reboot_result['rc'] != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Reboot command failed. Error was: '{stdout}, {stderr}'".format(
stdout=to_native(reboot_result['stdout'].strip()),
stderr=to_native(reboot_result['stderr'].strip()))
return result
result['failed'] = False
return result
def validate_reboot(self, distribution, original_connection_timeout=None, action_kwargs=None):
display.vvv('{action}: validating reboot'.format(action=self._task.action))
result = {}
try:
# keep on checking system boot_time with short connection responses
reboot_timeout = int(self._task.args.get('reboot_timeout', self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT)))
self.do_until_success_or_timeout(
action=self.check_boot_time,
action_desc="last boot time check",
reboot_timeout=reboot_timeout,
distribution=distribution,
action_kwargs=action_kwargs)
# Get the connect_timeout set on the connection to compare to the original
try:
connect_timeout = self._connection.get_option('connection_timeout')
except KeyError:
try:
connect_timeout = self._connection.get_option('timeout')
except KeyError:
pass
else:
if original_connection_timeout != connect_timeout:
try:
display.debug("{action}: setting connect_timeout/timeout back to original value of {value}".format(action=self._task.action,
value=original_connection_timeout))
try:
self._connection.set_option("connection_timeout", original_connection_timeout)
except AnsibleError:
try:
self._connection.set_option("timeout", original_connection_timeout)
except AnsibleError:
raise
# reset the connection to clear the custom connection timeout
self._connection.reset()
except (AnsibleError, AttributeError) as e:
display.debug("{action}: failed to reset connection_timeout back to default: {error}".format(action=self._task.action,
error=to_text(e)))
# finally run test command to ensure everything is working
# FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
self.do_until_success_or_timeout(
action=self.run_test_command,
action_desc="post-reboot test command",
reboot_timeout=reboot_timeout,
distribution=distribution,
action_kwargs=action_kwargs)
result['rebooted'] = True
result['changed'] = True
except TimedOutException as toex:
result['failed'] = True
result['rebooted'] = True
result['msg'] = to_text(toex)
return result
return result
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
# If running with local connection, fail so we don't reboot ourselves
if self._connection.transport == 'local':
msg = 'Running {0} with local connection would reboot the control node.'.format(self._task.action)
return {'changed': False, 'elapsed': 0, 'rebooted': False, 'failed': True, 'msg': msg}
if self._task.check_mode:
return {'changed': True, 'elapsed': 0, 'rebooted': True}
if task_vars is None:
task_vars = {}
self.deprecated_args()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
distribution = self.get_distribution(task_vars)
# Get current boot time
try:
previous_boot_time = self.get_system_boot_time(distribution)
except Exception as e:
result['failed'] = True
result['reboot'] = False
result['msg'] = to_text(e)
return result
# Get the original connection_timeout option var so it can be reset after
original_connection_timeout = None
display.debug("{action}: saving original connect_timeout of {timeout}".format(action=self._task.action, timeout=original_connection_timeout))
try:
original_connection_timeout = self._connection.get_option('connection_timeout')
except KeyError:
try:
original_connection_timeout = self._connection.get_option('timeout')
except KeyError:
display.debug("{action}: connect_timeout connection option has not been set".format(action=self._task.action))
# Initiate reboot
reboot_result = self.perform_reboot(task_vars, distribution)
if reboot_result['failed']:
result = reboot_result
elapsed = datetime.now(timezone.utc) - reboot_result['start']
result['elapsed'] = elapsed.seconds
return result
if self.post_reboot_delay != 0:
display.debug("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
display.vvv("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
time.sleep(self.post_reboot_delay)
# Make sure reboot was successful
result = self.validate_reboot(distribution, original_connection_timeout, action_kwargs={'previous_boot_time': previous_boot_time})
elapsed = datetime.now(timezone.utc) - reboot_result['start']
result['elapsed'] = elapsed.seconds
return result
| 22,032
|
Python
|
.py
| 406
| 41.248768
| 158
| 0.597485
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|