id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
14,200
wait_for_connection.py
ansible_ansible/lib/ansible/modules/wait_for_connection.py
# -*- coding: utf-8 -*- # Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations DOCUMENTATION = r""" --- module: wait_for_connection short_description: Waits until remote system is reachable/usable description: - Waits for a total of O(timeout) seconds. - Retries the transport connection after a timeout of O(connect_timeout). - Tests the transport connection every O(sleep) seconds. - This module makes use of internal ansible transport (and configuration) and the M(ansible.builtin.ping)/M(ansible.windows.win_ping) modules to guarantee correct end-to-end functioning. - This module is also supported for Windows targets. version_added: '2.3' options: connect_timeout: description: - Maximum number of seconds to wait for a connection to happen before closing and retrying. type: int default: 5 delay: description: - Number of seconds to wait before starting to poll. type: int default: 0 sleep: description: - Number of seconds to sleep between checks. type: int default: 1 timeout: description: - Maximum number of seconds to wait for. type: int default: 600 extends_documentation_fragment: - action_common_attributes - action_common_attributes.flow attributes: action: support: full async: support: none bypass_host_loop: support: none check_mode: support: none diff_mode: support: none platform: details: As long as there is a connection plugin platforms: all seealso: - module: ansible.builtin.wait_for - module: ansible.windows.win_wait_for - module: community.windows.win_wait_for_process author: - Dag Wieers (@dagwieers) """ EXAMPLES = r""" - name: Wait 600 seconds for target connection to become reachable/usable ansible.builtin.wait_for_connection: - name: Wait 300 seconds, but only start checking after 60 seconds ansible.builtin.wait_for_connection: delay: 60 timeout: 300 # Wake desktops, wait for them to become ready and continue playbook - hosts: all gather_facts: no tasks: - name: Send magic Wake-On-Lan packet to turn on individual systems community.general.wakeonlan: mac: '{{ mac }}' broadcast: 192.168.0.255 delegate_to: localhost - name: Wait for system to become reachable ansible.builtin.wait_for_connection: - name: Gather facts for first time ansible.builtin.setup: # Build a new VM, wait for it to become ready and continue playbook - hosts: all gather_facts: no tasks: - name: Clone new VM, if missing community.vmware.vmware_guest: hostname: '{{ vcenter_ipaddress }}' name: '{{ inventory_hostname_short }}' template: Windows 2012R2 customization: hostname: '{{ vm_shortname }}' runonce: - cmd.exe /c winrm.cmd quickconfig -quiet -force delegate_to: localhost - name: Wait for system to become reachable over WinRM ansible.builtin.wait_for_connection: timeout: 900 - name: Gather facts for first time ansible.builtin.setup: """ RETURN = r""" elapsed: description: The number of seconds that elapsed waiting for the connection to appear. returned: always type: float sample: 23.1 """
3,367
Python
.py
108
27.277778
133
0.721195
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,201
systemd.py
ansible_ansible/lib/ansible/modules/systemd.py
# -*- coding: utf-8 -*- # Copyright: (c) 2016, Brian Coca <bcoca@ansible.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations DOCUMENTATION = """ module: systemd_service author: - Ansible Core Team version_added: "2.2" short_description: Manage systemd units description: - Controls systemd units (services, timers, and so on) on remote hosts. - M(ansible.builtin.systemd) is renamed to M(ansible.builtin.systemd_service) to better reflect the scope of the module. M(ansible.builtin.systemd) is kept as an alias for backward compatibility. options: name: description: - Name of the unit. This parameter takes the name of exactly one unit to work with. - When no extension is given, it is implied to a C(.service) as systemd. - When using in a chroot environment you always need to specify the name of the unit with the extension. For example, C(crond.service). type: str aliases: [ service, unit ] state: description: - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always bounce the unit. V(reloaded) will always reload and if the service is not running at the moment of the reload, it is started. - If set, requires O(name). type: str choices: [ reloaded, restarted, started, stopped ] enabled: description: - Whether the unit should start on boot. At least one of O(state) and O(enabled) are required. - If set, requires O(name). type: bool force: description: - Whether to override existing symlinks. type: bool version_added: 2.6 masked: description: - Whether the unit should be masked or not. A masked unit is impossible to start. - If set, requires O(name). type: bool daemon_reload: description: - Run C(daemon-reload) before doing any other operations, to make sure systemd has read any changes. - When set to V(true), runs C(daemon-reload) even if the module does not start or stop anything. type: bool default: no aliases: [ daemon-reload ] daemon_reexec: description: - Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state. type: bool default: no aliases: [ daemon-reexec ] version_added: "2.8" scope: description: - Run C(systemctl) within a given service manager scope, either as the default system scope V(system), the current user's scope V(user), or the scope of all users V(global). - "For systemd to work with V(user), the executing user must have its own instance of dbus started and accessible (systemd requirement)." - "The user dbus process is normally started during normal login, but not during the run of Ansible tasks. Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error." - The user must have access, normally given via setting the C(XDG_RUNTIME_DIR) variable, see the example below. type: str choices: [ system, user, global ] default: system version_added: "2.7" no_block: description: - Do not synchronously wait for the requested operation to finish. Enqueued job will continue without Ansible blocking on its completion. type: bool default: no version_added: "2.3" extends_documentation_fragment: action_common_attributes attributes: check_mode: support: full diff_mode: support: none platform: platforms: posix notes: - O(state), O(enabled), O(masked) requires O(name). - Before 2.4 you always required O(name). - Globs are not supported in name, in other words, C(postgres*.service). - The service names might vary by specific OS/distribution. - The order of execution when having multiple properties is to first enable/disable, then mask/unmask and then deal with the service state. It has been reported that C(systemctl) can behave differently depending on the order of operations if you do the same manually. requirements: - A system managed by systemd. """ EXAMPLES = """ - name: Make sure a service unit is running ansible.builtin.systemd_service: state: started name: httpd - name: Stop service cron on debian, if running ansible.builtin.systemd_service: name: cron state: stopped - name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes ansible.builtin.systemd_service: state: restarted daemon_reload: true name: crond - name: Reload service httpd, in all cases ansible.builtin.systemd_service: name: httpd.service state: reloaded - name: Enable service httpd and ensure it is not masked ansible.builtin.systemd_service: name: httpd enabled: true masked: no - name: Enable a timer unit for dnf-automatic ansible.builtin.systemd_service: name: dnf-automatic.timer state: started enabled: true - name: Just force systemd to reread configs (2.4 and above) ansible.builtin.systemd_service: daemon_reload: true - name: Just force systemd to re-execute itself (2.8 and above) ansible.builtin.systemd_service: daemon_reexec: true - name: Run a user service when XDG_RUNTIME_DIR is not set on remote login ansible.builtin.systemd_service: name: myservice state: started scope: user environment: XDG_RUNTIME_DIR: "/run/user/{{ myuid }}" """ RETURN = """ status: description: A dictionary with the key=value pairs returned from C(systemctl show). returned: success type: dict sample: { "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ActiveEnterTimestampMonotonic": "8135942", "ActiveExitTimestampMonotonic": "0", "ActiveState": "active", "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice", "AllowIsolate": "no", "Before": "shutdown.target multi-user.target", "BlockIOAccounting": "no", "BlockIOWeight": "1000", "CPUAccounting": "no", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "1024", "CanIsolate": "no", "CanReload": "yes", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "18446744073709551615", "ConditionResult": "yes", "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ConditionTimestampMonotonic": "7902742", "Conflicts": "shutdown.target", "ControlGroup": "/system.slice/crond.service", "ControlPID": "0", "DefaultDependencies": "yes", "Delegate": "no", "Description": "Command Scheduler", "DevicePolicy": "auto", "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "595", "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ExecMainStartTimestampMonotonic": "8134990", "ExecMainStatus": "0", "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "FragmentPath": "/usr/lib/systemd/system/crond.service", "GuessMainPID": "yes", "IOScheduling": "0", "Id": "crond.service", "IgnoreOnIsolate": "no", "IgnoreOnSnapshot": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT", "InactiveExitTimestampMonotonic": "8135942", "JobTimeoutUSec": "0", "KillMode": "process", "KillSignal": "15", "LimitAS": "18446744073709551615", "LimitCORE": "18446744073709551615", "LimitCPU": "18446744073709551615", "LimitDATA": "18446744073709551615", "LimitFSIZE": "18446744073709551615", "LimitLOCKS": "18446744073709551615", "LimitMEMLOCK": "65536", "LimitMSGQUEUE": "819200", "LimitNICE": "0", "LimitNOFILE": "4096", "LimitNPROC": "3902", "LimitRSS": "18446744073709551615", "LimitRTPRIO": "0", "LimitRTTIME": "18446744073709551615", "LimitSIGPENDING": "3902", "LimitSTACK": "18446744073709551615", "LoadState": "loaded", "MainPID": "595", "MemoryAccounting": "no", "MemoryLimit": "18446744073709551615", "MountFlags": "0", "Names": "crond.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "none", "OOMScoreAdjust": "0", "OnFailureIsolate": "no", "PermissionsStartOnly": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "RemainAfterExit": "no", "Requires": "basic.target", "Restart": "no", "RestartUSec": "100ms", "Result": "success", "RootDirectoryStartOnly": "no", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "system.slice", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitInterval": "10000000", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "running", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TimeoutStartUSec": "1min 30s", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "simple", "UMask": "0022", "UnitFileState": "enabled", "WantedBy": "multi-user.target", "Wants": "system.slice", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "0", } """ # NOQA import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.facts.system.chroot import is_chroot from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing from ansible.module_utils.common.text.converters import to_native def is_running_service(service_status): return service_status['ActiveState'] in set(['active', 'activating']) def is_deactivating_service(service_status): return service_status['ActiveState'] in set(['deactivating']) def request_was_ignored(out): return '=' not in out and ('ignoring request' in out or 'ignoring command' in out) def parse_systemctl_show(lines): # The output of 'systemctl show' can contain values that span multiple lines. At first glance it # appears that such values are always surrounded by {}, so the previous version of this code # assumed that any value starting with { was a multi-line value; it would then consume lines # until it saw a line that ended with }. However, it is possible to have a single-line value # that starts with { but does not end with } (this could happen in the value for Description=, # for example), and the previous version of this code would then consume all remaining lines as # part of that value. Cryptically, this would lead to Ansible reporting that the service file # couldn't be found. # # To avoid this issue, the following code only accepts multi-line values for keys whose names # start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to # span multiple lines. parsed = {} multival = [] k = None for line in lines: if k is None: if '=' in line: k, v = line.split('=', 1) if k.startswith('Exec') and v.lstrip().startswith('{'): if not v.rstrip().endswith('}'): multival.append(v) continue parsed[k] = v.strip() k = None else: multival.append(line) if line.rstrip().endswith('}'): parsed[k] = '\n'.join(multival).strip() multival = [] k = None return parsed # =========================================== # Main control flow def main(): # initialize module = AnsibleModule( argument_spec=dict( name=dict(type='str', aliases=['service', 'unit']), state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']), enabled=dict(type='bool'), force=dict(type='bool'), masked=dict(type='bool'), daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']), daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']), scope=dict(type='str', default='system', choices=['system', 'user', 'global']), no_block=dict(type='bool', default=False), ), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']], required_by=dict( state=('name', ), enabled=('name', ), masked=('name', ), ), ) unit = module.params['name'] if unit is not None: for globpattern in (r"*", r"?", r"["): if globpattern in unit: module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit)) systemctl = module.get_bin_path('systemctl', True) if os.getenv('XDG_RUNTIME_DIR') is None: os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid() # Set CLI options depending on params # if scope is 'system' or None, we can ignore as there is no extra switch. # The other choices match the corresponding switch if module.params['scope'] != 'system': systemctl += " --%s" % module.params['scope'] if module.params['no_block']: systemctl += " --no-block" if module.params['force']: systemctl += " --force" rc = 0 out = err = '' result = dict( name=unit, changed=False, status=dict(), ) # Run daemon-reload first, if requested if module.params['daemon_reload'] and not module.check_mode: (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) if rc != 0: if is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1': module.warn('daemon-reload failed, but target is a chroot or systemd is offline. Continuing. Error was: %d / %s' % (rc, err)) else: module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) # Run daemon-reexec if module.params['daemon_reexec'] and not module.check_mode: (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl)) if rc != 0: if is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1': module.warn('daemon-reexec failed, but target is a chroot or systemd is offline. Continuing. Error was: %d / %s' % (rc, err)) else: module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err)) if unit: found = False is_initd = sysv_exists(unit) is_systemd = False # check service data, cannot error out on rc as it changes across versions, assume not found (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)): # load return of systemctl show into dictionary for easy access and return if out: result['status'] = parse_systemctl_show(to_native(out).split('\n')) is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found' is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked' # Check for loading error if is_systemd and not is_masked and 'LoadError' in result['status']: module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])) # Workaround for https://github.com/ansible/ansible/issues/71528 elif err and rc == 1 and 'Failed to parse bus message' in err: result['status'] = parse_systemctl_show(to_native(out).split('\n')) unit_base, sep, suffix = unit.partition('@') unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep) (rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search)) is_systemd = unit_search in out (rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit)) result['status']['ActiveState'] = out.rstrip('\n') else: # list taken from man systemctl(1) for systemd 244 valid_enabled_states = [ "enabled", "enabled-runtime", "linked", "linked-runtime", "masked", "masked-runtime", "static", "indirect", "disabled", "generated", "transient"] (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) if out.strip() in valid_enabled_states: is_systemd = True else: # fallback list-unit-files as show does not work on some systems (chroot) # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit)) if rc == 0: is_systemd = True else: # Check for systemctl command module.run_command(systemctl, check_rc=True) # Does service exist? found = is_systemd or is_initd if is_initd and not is_systemd: module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit) # mask/unmask the service, if requested, can operate on services before they are installed if module.params['masked'] is not None: # state is not masked unless systemd affirms otherwise (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) masked = out.strip() == "masked" if masked != module.params['masked']: result['changed'] = True if module.params['masked']: action = 'mask' else: action = 'unmask' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't fail_if_missing(module, found, unit, msg='host') # here if service was not missing, but failed for other reasons module.fail_json(msg=f"Failed to {action} the service ({unit}): {err.strip()}") # Enable/disable service startup at boot if requested if module.params['enabled'] is not None: if module.params['enabled']: action = 'enable' else: action = 'disable' fail_if_missing(module, found, unit, msg='host') # do we need to enable the service? enabled = False (rc, out, err) = module.run_command("%s is-enabled '%s' -l" % (systemctl, unit)) # check systemctl result or if it is a init script if rc == 0: # https://www.freedesktop.org/software/systemd/man/systemctl.html#is-enabled%20UNIT%E2%80%A6 if out.rstrip() in ( "enabled-runtime", # transiently enabled but we're trying to set a permanent enabled "indirect", # We've been asked to enable this unit so do so despite possible reasons # that systemctl may have for thinking it's enabled already. "alias"): # Let systemd handle the alias as we can't be sure what's needed. enabled = False else: enabled = True elif rc == 1: # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries if module.params['scope'] == 'system' and \ is_initd and \ not out.strip().endswith('disabled') and \ sysv_is_enabled(unit): enabled = True # default to current state result['enabled'] = enabled # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err)) result['enabled'] = not enabled # set service state if requested if module.params['state'] is not None: fail_if_missing(module, found, unit, msg="host") # default to desired state result['state'] = module.params['state'] # What is current service state? if 'ActiveState' in result['status']: action = None if module.params['state'] == 'started': if not is_running_service(result['status']): action = 'start' elif module.params['state'] == 'stopped': if is_running_service(result['status']) or is_deactivating_service(result['status']): action = 'stop' else: if not is_running_service(result['status']): action = 'start' else: action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded result['state'] = 'started' if action: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) # check for chroot elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1': module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.") else: # this should not happen? module.fail_json(msg="Service is in unknown state", status=result['status']) module.exit_json(**result) if __name__ == '__main__': main()
24,990
Python
.py
520
36.792308
182
0.581814
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,202
group.py
ansible_ansible/lib/ansible/modules/group.py
# -*- coding: utf-8 -*- # Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations DOCUMENTATION = """ --- module: group version_added: "0.0.2" short_description: Add or remove groups requirements: - groupadd - groupdel - groupmod description: - Manage presence of groups on a host. - For Windows targets, use the M(ansible.windows.win_group) module instead. options: name: description: - Name of the group to manage. type: str required: true gid: description: - Optional I(GID) to set for the group. type: int state: description: - Whether the group should be present or not on the remote host. type: str choices: [ absent, present ] default: present force: description: - Whether to delete a group even if it is the primary group of a user. - Only applicable on platforms which implement a C(--force) flag on the group deletion command. type: bool default: false version_added: "2.15" system: description: - If V(yes), indicates that the group created is a system group. type: bool default: no local: description: - Forces the use of "local" command alternatives on platforms that implement it. - This is useful in environments that use centralized authentication when you want to manipulate the local groups. (for example, it uses C(lgroupadd) instead of C(groupadd)). - This requires that these commands exist on the targeted host, otherwise it will be a fatal error. type: bool default: no version_added: "2.6" non_unique: description: - This option allows to change the group ID to a non-unique value. Requires O(gid). - Not supported on macOS or BusyBox distributions. type: bool default: no version_added: "2.8" gid_min: description: - Sets the GID_MIN value for group creation. - Overwrites /etc/login.defs default value. - Currently supported on Linux. Does nothing when used with other platforms. - Requires O(local) is omitted or V(False). type: int version_added: "2.18" gid_max: description: - Sets the GID_MAX value for group creation. - Overwrites /etc/login.defs default value. - Currently supported on Linux. Does nothing when used with other platforms. - Requires O(local) is omitted or V(False). type: int version_added: "2.18" extends_documentation_fragment: action_common_attributes attributes: check_mode: support: full diff_mode: support: none platform: platforms: posix seealso: - module: ansible.builtin.user - module: ansible.windows.win_group author: - Stephen Fromm (@sfromm) """ EXAMPLES = """ - name: Ensure group "somegroup" exists ansible.builtin.group: name: somegroup state: present - name: Ensure group "docker" exists with correct gid ansible.builtin.group: name: docker state: present gid: 1750 """ RETURN = r""" gid: description: Group ID of the group. returned: When O(state) is C(present) type: int sample: 1001 name: description: Group name. returned: always type: str sample: users state: description: Whether the group is present or not. returned: always type: str sample: 'absent' system: description: Whether the group is a system group or not. returned: When O(state) is C(present) type: bool sample: False """ import grp import os from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.sys_info import get_platform_subclass class Group(object): """ This is a generic Group manipulation class that is subclassed based on platform. A subclass may wish to override the following action methods:- - group_del() - group_add() - group_mod() All subclasses MUST define platform and distribution (which may be None). """ platform = 'Generic' distribution = None # type: str | None GROUPFILE = '/etc/group' def __new__(cls, *args, **kwargs): new_cls = get_platform_subclass(Group) return super(cls, new_cls).__new__(new_cls) def __init__(self, module): self.module = module self.state = module.params['state'] self.name = module.params['name'] self.force = module.params['force'] self.gid = module.params['gid'] self.system = module.params['system'] self.local = module.params['local'] self.non_unique = module.params['non_unique'] self.gid_min = module.params['gid_min'] self.gid_max = module.params['gid_max'] if self.local: if self.gid_min is not None: module.fail_json(msg="'gid_min' can not be used with 'local'") if self.gid_max is not None: module.fail_json(msg="'gid_max' can not be used with 'local'") def execute_command(self, cmd): return self.module.run_command(cmd) def group_del(self): if self.local: command_name = 'lgroupdel' else: command_name = 'groupdel' cmd = [self.module.get_bin_path(command_name, True), self.name] return self.execute_command(cmd) def _local_check_gid_exists(self): if self.gid: for gr in grp.getgrall(): if self.gid == gr.gr_gid and self.name != gr.gr_name: self.module.fail_json(msg="GID '{0}' already exists with group '{1}'".format(self.gid, gr.gr_name)) def group_add(self, **kwargs): if self.local: command_name = 'lgroupadd' self._local_check_gid_exists() else: command_name = 'groupadd' cmd = [self.module.get_bin_path(command_name, True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('-g') cmd.append(str(kwargs[key])) if self.non_unique: cmd.append('-o') elif key == 'system' and kwargs[key] is True: cmd.append('-r') if self.gid_min is not None: cmd.append('-K') cmd.append('GID_MIN=' + str(self.gid_min)) if self.gid_max is not None: cmd.append('-K') cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): if self.local: command_name = 'lgroupmod' self._local_check_gid_exists() else: command_name = 'groupmod' cmd = [self.module.get_bin_path(command_name, True)] info = self.group_info() for key in kwargs: if key == 'gid': if kwargs[key] is not None and info[2] != int(kwargs[key]): cmd.append('-g') cmd.append(str(kwargs[key])) if self.non_unique: cmd.append('-o') if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) def group_exists(self): # The grp module does not distinguish between local and directory accounts. # It's output cannot be used to determine whether or not a group exists locally. # It returns True if the group exists locally or in the directory, so instead # look in the local GROUP file for an existing account. if self.local: if not os.path.exists(self.GROUPFILE): self.module.fail_json(msg="'local: true' specified but unable to find local group file {0} to parse.".format(self.GROUPFILE)) exists = False name_test = '{0}:'.format(self.name) with open(self.GROUPFILE, 'rb') as f: reversed_lines = f.readlines()[::-1] for line in reversed_lines: if line.startswith(to_bytes(name_test)): exists = True break return exists else: try: if grp.getgrnam(self.name): return True except KeyError: return False def group_info(self): if not self.group_exists(): return False try: info = list(grp.getgrnam(self.name)) except KeyError: return False return info # =========================================== class Linux(Group): """ This is a Linux Group manipulation class. This is to apply the '-f' parameter to the groupdel command This overrides the following methods from the generic class:- - group_del() """ platform = 'Linux' distribution = None def group_del(self): if self.local: command_name = 'lgroupdel' else: command_name = 'groupdel' cmd = [self.module.get_bin_path(command_name, True)] if self.force: cmd.append('-f') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class SunOS(Group): """ This is a SunOS Group manipulation class. Solaris doesn't have the 'system' group concept. This overrides the following methods from the generic class:- - group_add() """ platform = 'SunOS' distribution = None GROUPFILE = '/etc/group' def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('-g') cmd.append(str(kwargs[key])) if self.non_unique: cmd.append('-o') if self.gid_min is not None: cmd.append('-K') cmd.append('GID_MIN=' + str(self.gid_min)) if self.gid_max is not None: cmd.append('-K') cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) # =========================================== class AIX(Group): """ This is a AIX Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'AIX' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('rmgroup', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('mkgroup', True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('id=' + str(kwargs[key])) elif key == 'system' and kwargs[key] is True: cmd.append('-a') if self.gid_min is not None: cmd.append('-K') cmd.append('GID_MIN=' + str(self.gid_min)) if self.gid_max is not None: cmd.append('-K') cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('chgroup', True)] info = self.group_info() for key in kwargs: if key == 'gid': if kwargs[key] is not None and info[2] != int(kwargs[key]): cmd.append('id=' + str(kwargs[key])) if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class FreeBsdGroup(Group): """ This is a FreeBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'FreeBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] if self.gid is not None: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') if self.gid_min is not None: cmd.append('-K') cmd.append('GID_MIN=' + str(self.gid_min)) if self.gid_max is not None: cmd.append('-K') cmd.append('GID_MAX=' + str(self.gid_max)) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name] info = self.group_info() cmd_len = len(cmd) if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') # modify the group if cmd will do anything if cmd_len != len(cmd): if self.module.check_mode: return (0, '', '') return self.execute_command(cmd) return (None, '', '') class DragonFlyBsdGroup(FreeBsdGroup): """ This is a DragonFlyBSD Group manipulation class. It inherits all behaviors from FreeBsdGroup class. """ platform = 'DragonFly' # =========================================== class DarwinGroup(Group): """ This is a Mac macOS Darwin Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() group manipulation are done using dseditgroup(1). """ platform = 'Darwin' distribution = None def group_add(self, **kwargs): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'create'] if self.gid is not None: cmd += ['-i', str(self.gid)] elif 'system' in kwargs and kwargs['system'] is True: gid = self.get_lowest_available_system_gid() if gid is not False: self.gid = str(gid) cmd += ['-i', str(self.gid)] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) def group_del(self): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'delete'] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) def group_mod(self, gid=None): info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'edit'] if gid is not None: cmd += ['-i', str(gid)] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) return (None, '', '') def get_lowest_available_system_gid(self): # check for lowest available system gid (< 500) try: cmd = [self.module.get_bin_path('dscl', True)] cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID'] (rc, out, err) = self.execute_command(cmd) lines = out.splitlines() highest = 0 for group_info in lines: parts = group_info.split(' ') if len(parts) > 1: gid = int(parts[-1]) if gid > highest and gid < 500: highest = gid if highest == 0 or highest == 499: return False return (highest + 1) except Exception: return False class OpenBsdGroup(Group): """ This is a OpenBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'OpenBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('groupdel', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] if self.gid is not None: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') if self.gid_min is not None: cmd.append('-K') cmd.append('GID_MIN=' + str(self.gid_min)) if self.gid_max is not None: cmd.append('-K') cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('groupmod', True)] info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class NetBsdGroup(Group): """ This is a NetBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'NetBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('groupdel', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] if self.gid is not None: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') if self.gid_min is not None: cmd.append('-K') cmd.append('GID_MIN=' + str(self.gid_min)) if self.gid_max is not None: cmd.append('-K') cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('groupmod', True)] info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class BusyBoxGroup(Group): """ BusyBox group manipulation class for systems that have addgroup and delgroup. It overrides the following methods: - group_add() - group_del() - group_mod() """ def group_add(self, **kwargs): cmd = [self.module.get_bin_path('addgroup', True)] if self.gid is not None: cmd.extend(['-g', str(self.gid)]) if self.system: cmd.append('-S') if self.gid_min is not None: cmd.append('-K') cmd.append('GID_MIN=' + str(self.gid_min)) if self.gid_max is not None: cmd.append('-K') cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) def group_del(self): cmd = [self.module.get_bin_path('delgroup', True), self.name] return self.execute_command(cmd) def group_mod(self, **kwargs): # Since there is no groupmod command, modify /etc/group directly info = self.group_info() if self.gid is not None and self.gid != info[2]: with open('/etc/group', 'rb') as f: b_groups = f.read() b_name = to_bytes(self.name) b_current_group_string = b'%s:x:%d:' % (b_name, info[2]) b_new_group_string = b'%s:x:%d:' % (b_name, self.gid) if b':%d:' % self.gid in b_groups: self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid)) if self.module.check_mode: return 0, '', '' b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string) with open('/etc/group', 'wb') as f: f.write(b_new_groups) return 0, '', '' return None, '', '' class AlpineGroup(BusyBoxGroup): platform = 'Linux' distribution = 'Alpine' def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), name=dict(type='str', required=True), force=dict(type='bool', default=False), gid=dict(type='int'), system=dict(type='bool', default=False), local=dict(type='bool', default=False), non_unique=dict(type='bool', default=False), gid_min=dict(type='int'), gid_max=dict(type='int'), ), supports_check_mode=True, required_if=[ ['non_unique', True, ['gid']], ], ) if module.params['force'] and module.params['local']: module.fail_json(msg='force is not a valid option for local, force=True and local=True are mutually exclusive') group = Group(module) module.debug('Group instantiated - platform %s' % group.platform) if group.distribution: module.debug('Group instantiated - distribution %s' % group.distribution) rc = None out = '' err = '' result = {} result['name'] = group.name result['state'] = group.state if group.state == 'absent': if group.group_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = group.group_del() if rc != 0: module.fail_json(name=group.name, msg=err) elif group.state == 'present': if not group.group_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = group.group_add(gid=group.gid, system=group.system) else: (rc, out, err) = group.group_mod(gid=group.gid) if rc is not None and rc != 0: module.fail_json(name=group.name, msg=err) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err if group.group_exists(): info = group.group_info() result['system'] = group.system result['gid'] = info[2] module.exit_json(**result) if __name__ == '__main__': main()
23,747
Python
.py
644
27.757764
141
0.561733
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,203
mount_facts.py
ansible_ansible/lib/ansible/modules/mount_facts.py
# -*- coding: utf-8 -*- # Copyright: Contributors to the Ansible project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations DOCUMENTATION = """ --- module: mount_facts version_added: 2.18 short_description: Retrieve mount information. description: - Retrieve information about mounts from preferred sources and filter the results based on the filesystem type and device. options: devices: description: A list of fnmatch patterns to filter mounts by the special device or remote file system. default: ~ type: list elements: str fstypes: description: A list of fnmatch patterns to filter mounts by the type of the file system. default: ~ type: list elements: str sources: description: - A list of sources used to determine the mounts. Missing file sources (or empty files) are skipped. Repeat sources, including symlinks, are skipped. - The C(mount_points) return value contains the first definition found for a mount point. - Additional mounts to the same mount point are available from C(aggregate_mounts) (if enabled). - By default, mounts are retrieved from all of the standard locations, which have the predefined aliases V(all)/V(static)/V(dynamic). - V(all) contains V(dynamic) and V(static). - V(dynamic) contains V(/etc/mtab), V(/proc/mounts), V(/etc/mnttab), and the value of O(mount_binary) if it is not None. This allows platforms like BSD or AIX, which don't have an equivalent to V(/proc/mounts), to collect the current mounts by default. See the O(mount_binary) option to disable the fall back or configure a different executable. - V(static) contains V(/etc/fstab), V(/etc/vfstab), and V(/etc/filesystems). Note that V(/etc/filesystems) is specific to AIX. The Linux file by this name has a different format/purpose and is ignored. - The value of O(mount_binary) can be configured as a source, which will cause it to always execute. Depending on the other sources configured, this could be inefficient/redundant. For example, if V(/proc/mounts) and V(mount) are listed as O(sources), Linux hosts will retrieve the same mounts twice. default: ~ type: list elements: str mount_binary: description: - The O(mount_binary) is used if O(sources) contain the value "mount", or if O(sources) contains a dynamic source, and none were found (as can be expected on BSD or AIX hosts). - Set to V(null) to stop after no dynamic file source is found instead. type: raw default: mount timeout: description: - This is the maximum number of seconds to wait for each mount to complete. When this is V(null), wait indefinitely. - Configure in conjunction with O(on_timeout) to skip unresponsive mounts. - This timeout also applies to the O(mount_binary) command to list mounts. - If the module is configured to run during the play's fact gathering stage, set a timeout using module_defaults to prevent a hang (see example). type: float on_timeout: description: - The action to take when gathering mount information exceeds O(timeout). type: str default: error choices: - error - warn - ignore include_aggregate_mounts: description: - Whether or not the module should return the C(aggregate_mounts) list in C(ansible_facts). - When this is V(null), a warning will be emitted if multiple mounts for the same mount point are found. default: ~ type: bool extends_documentation_fragment: - action_common_attributes attributes: check_mode: support: full diff_mode: support: none platform: platforms: posix author: - Ansible Core Team - Sloane Hertel (@s-hertel) """ EXAMPLES = """ - name: Get non-local devices mount_facts: devices: "[!/]*" - name: Get FUSE subtype mounts mount_facts: fstypes: - "fuse.*" - name: Get NFS mounts during gather_facts with timeout hosts: all gather_facts: true vars: ansible_facts_modules: - ansible.builtin.mount_facts module_default: ansible.builtin.mount_facts: timeout: 10 fstypes: - nfs - nfs4 - name: Get mounts from a non-default location mount_facts: sources: - /usr/etc/fstab - name: Get mounts from the mount binary mount_facts: sources: - mount mount_binary: /sbin/mount """ RETURN = """ ansible_facts: description: - An ansible_facts dictionary containing a dictionary of C(mount_points) and list of C(aggregate_mounts) when enabled. - Each key in C(mount_points) is a mount point, and the value contains mount information (similar to C(ansible_facts["mounts"])). Each value also contains the key C(ansible_context), with details about the source and line(s) corresponding to the parsed mount point. - When C(aggregate_mounts) are included, the containing dictionaries are the same format as the C(mount_point) values. returned: on success type: dict sample: mount_points: /proc/sys/fs/binfmt_misc: ansible_context: source: /proc/mounts source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0" block_available: 0 block_size: 4096 block_total: 0 block_used: 0 device: "systemd-1" dump: 0 fstype: "autofs" inode_available: 0 inode_total: 0 inode_used: 0 mount: "/proc/sys/fs/binfmt_misc" options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850" passno: 0 size_available: 0 size_total: 0 uuid: null aggregate_mounts: - ansible_context: source: /proc/mounts source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0" block_available: 0 block_size: 4096 block_total: 0 block_used: 0 device: "systemd-1" dump: 0 fstype: "autofs" inode_available: 0 inode_total: 0 inode_used: 0 mount: "/proc/sys/fs/binfmt_misc" options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850" passno: 0 size_available: 0 size_total: 0 uuid: null - ansible_context: source: /proc/mounts source_data: "binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0" block_available: 0 block_size: 4096 block_total: 0 block_used: 0 device: binfmt_misc dump: 0 fstype: binfmt_misc inode_available: 0 inode_total: 0 inode_used: 0 mount: "/proc/sys/fs/binfmt_misc" options: "rw,nosuid,nodev,noexec,relatime" passno: 0 size_available: 0 size_total: 0 uuid: null """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.facts import timeout as _timeout from ansible.module_utils.facts.utils import get_mount_size, get_file_content from contextlib import suppress from dataclasses import astuple, dataclass from fnmatch import fnmatch import codecs import datetime import functools import os import re import subprocess import typing as t STATIC_SOURCES = ["/etc/fstab", "/etc/vfstab", "/etc/filesystems"] DYNAMIC_SOURCES = ["/etc/mtab", "/proc/mounts", "/etc/mnttab"] # AIX and BSD don't have a file-based dynamic source, so the module also supports running a mount binary to collect these. # Pattern for Linux, including OpenBSD and NetBSD LINUX_MOUNT_RE = re.compile(r"^(?P<device>\S+) on (?P<mount>\S+) type (?P<fstype>\S+) \((?P<options>.+)\)$") # Pattern for other BSD including FreeBSD, DragonFlyBSD, and MacOS BSD_MOUNT_RE = re.compile(r"^(?P<device>\S+) on (?P<mount>\S+) \((?P<fstype>.+)\)$") # Pattern for AIX, example in https://www.ibm.com/docs/en/aix/7.2?topic=m-mount-command AIX_MOUNT_RE = re.compile(r"^(?P<node>\S*)\s+(?P<mounted>\S+)\s+(?P<mount>\S+)\s+(?P<fstype>\S+)\s+(?P<time>\S+\s+\d+\s+\d+:\d+)\s+(?P<options>.*)$") @dataclass class MountInfo: mount_point: str line: str fields: dict[str, str | int] @dataclass class MountInfoOptions: mount_point: str line: str fields: dict[str, str | dict[str, str]] def replace_octal_escapes(value: str) -> str: return re.sub(r"(\\[0-7]{3})", lambda m: codecs.decode(m.group(0), "unicode_escape"), value) @functools.lru_cache(maxsize=None) def get_device_by_uuid(module: AnsibleModule, uuid : str) -> str | None: """Get device information by UUID.""" blkid_output = None if (blkid_binary := module.get_bin_path("blkid")): cmd = [blkid_binary, "--uuid", uuid] with suppress(subprocess.CalledProcessError): blkid_output = handle_timeout(module)(subprocess.check_output)(cmd, text=True, timeout=module.params["timeout"]) return blkid_output @functools.lru_cache(maxsize=None) def list_uuids_linux() -> list[str]: """List UUIDs from the system.""" with suppress(OSError): return os.listdir("/dev/disk/by-uuid") return [] @functools.lru_cache(maxsize=None) def run_lsblk(module : AnsibleModule) -> list[list[str]]: """Return device, UUID pairs from lsblk.""" lsblk_output = "" if (lsblk_binary := module.get_bin_path("lsblk")): cmd = [lsblk_binary, "--list", "--noheadings", "--paths", "--output", "NAME,UUID", "--exclude", "2"] lsblk_output = subprocess.check_output(cmd, text=True, timeout=module.params["timeout"]) return [line.split() for line in lsblk_output.splitlines() if len(line.split()) == 2] @functools.lru_cache(maxsize=None) def get_udevadm_device_uuid(module : AnsibleModule, device : str) -> str | None: """Fallback to get the device's UUID for lsblk <= 2.23 which doesn't have the --paths option.""" udevadm_output = "" if (udevadm_binary := module.get_bin_path("udevadm")): cmd = [udevadm_binary, "info", "--query", "property", "--name", device] udevadm_output = subprocess.check_output(cmd, text=True, timeout=module.params["timeout"]) uuid = None for line in udevadm_output.splitlines(): # a snippet of the output of the udevadm command below will be: # ... # ID_FS_TYPE=ext4 # ID_FS_USAGE=filesystem # ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179 # ... if line.startswith("ID_FS_UUID="): uuid = line.split("=", 1)[1] break return uuid def get_partition_uuid(module: AnsibleModule, partname : str) -> str | None: """Get the UUID of a partition by its name.""" # TODO: NetBSD and FreeBSD can have UUIDs in /etc/fstab, # but none of these methods work (mount always displays the label though) for uuid in list_uuids_linux(): dev = os.path.realpath(os.path.join("/dev/disk/by-uuid", uuid)) if partname == dev: return uuid for dev, uuid in handle_timeout(module, default=[])(run_lsblk)(module): if partname == dev: return uuid return handle_timeout(module)(get_udevadm_device_uuid)(module, partname) def handle_timeout(module, default=None): """Decorator to catch timeout exceptions and handle failing, warning, and ignoring the timeout.""" def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except (subprocess.TimeoutExpired, _timeout.TimeoutError) as e: if module.params["on_timeout"] == "error": module.fail_json(msg=str(e)) elif module.params["on_timeout"] == "warn": module.warn(str(e)) return default return wrapper return decorator def run_mount_bin(module: AnsibleModule, mount_bin: str) -> str: # type: ignore # Missing return statement """Execute the specified mount binary with optional timeout.""" mount_bin = module.get_bin_path(mount_bin, required=True) try: return handle_timeout(module, default="")(subprocess.check_output)( mount_bin, text=True, timeout=module.params["timeout"] ) except subprocess.CalledProcessError as e: module.fail_json(msg=f"Failed to execute {mount_bin}: {str(e)}") def get_mount_pattern(stdout: str): lines = stdout.splitlines() pattern = None if all(LINUX_MOUNT_RE.match(line) for line in lines): pattern = LINUX_MOUNT_RE elif all(BSD_MOUNT_RE.match(line) for line in lines if not line.startswith("map ")): pattern = BSD_MOUNT_RE elif len(lines) > 2 and all(AIX_MOUNT_RE.match(line) for line in lines[2:]): pattern = AIX_MOUNT_RE return pattern def gen_mounts_from_stdout(stdout: str) -> t.Iterable[MountInfo]: """List mount dictionaries from mount stdout.""" if not (pattern := get_mount_pattern(stdout)): stdout = "" for line in stdout.splitlines(): if not (match := pattern.match(line)): # AIX has a couple header lines for some reason # MacOS "map" lines are skipped (e.g. "map auto_home on /System/Volumes/Data/home (autofs, automounted, nobrowse)") # TODO: include MacOS lines continue mount = match.groupdict()["mount"] if pattern is LINUX_MOUNT_RE: mount_info = match.groupdict() elif pattern is BSD_MOUNT_RE: # the group containing fstype is comma separated, and may include whitespace mount_info = match.groupdict() parts = re.split(r"\s*,\s*", match.group("fstype"), 1) if len(parts) == 1: mount_info["fstype"] = parts[0] else: mount_info.update({"fstype": parts[0], "options": parts[1]}) elif pattern is AIX_MOUNT_RE: mount_info = match.groupdict() device = mount_info.pop("mounted") node = mount_info.pop("node") if device and node: device = f"{node}:{device}" mount_info["device"] = device yield MountInfo(mount, line, mount_info) def gen_fstab_entries(lines: list[str]) -> t.Iterable[MountInfo]: """Yield tuples from /etc/fstab https://man7.org/linux/man-pages/man5/fstab.5.html. Each tuple contains the mount point, line of origin, and the dictionary of the parsed line. """ for line in lines: if not (line := line.strip()) or line.startswith("#"): continue fields = [replace_octal_escapes(field) for field in line.split()] mount_info: dict[str, str | int] = { "device": fields[0], "mount": fields[1], "fstype": fields[2], "options": fields[3], } with suppress(IndexError): # the last two fields are optional mount_info["dump"] = int(fields[4]) mount_info["passno"] = int(fields[5]) yield MountInfo(fields[1], line, mount_info) def gen_vfstab_entries(lines: list[str]) -> t.Iterable[MountInfo]: """Yield tuples from /etc/vfstab https://docs.oracle.com/cd/E36784_01/html/E36882/vfstab-4.html. Each tuple contains the mount point, line of origin, and the dictionary of the parsed line. """ for line in lines: if not line.strip() or line.strip().startswith("#"): continue fields = line.split() passno: str | int = fields[4] with suppress(ValueError): passno = int(passno) mount_info: dict[str, str | int] = { "device": fields[0], "device_to_fsck": fields[1], "mount": fields[2], "fstype": fields[3], "passno": passno, "mount_at_boot": fields[5], "options": fields[6], } yield MountInfo(fields[2], line, mount_info) def list_aix_filesystems_stanzas(lines: list[str]) -> list[list[str]]: """Parse stanzas from /etc/filesystems according to https://www.ibm.com/docs/hu/aix/7.2?topic=files-filesystems-file.""" stanzas = [] for line in lines: if line.startswith("*") or not line.strip(): continue if line.rstrip().endswith(":"): stanzas.append([line]) else: if "=" not in line: # Expected for Linux, return an empty list since this doesn't appear to be AIX /etc/filesystems stanzas = [] break stanzas[-1].append(line) return stanzas def gen_aix_filesystems_entries(lines: list[str]) -> t.Iterable[MountInfoOptions]: """Yield tuples from /etc/filesystems https://www.ibm.com/docs/hu/aix/7.2?topic=files-filesystems-file. Each tuple contains the mount point, lines of origin, and the dictionary of the parsed lines. """ for stanza in list_aix_filesystems_stanzas(lines): original = "\n".join(stanza) mount = stanza.pop(0)[:-1] # snip trailing : mount_info: dict[str, str] = {} for line in stanza: attr, value = line.split("=", 1) mount_info[attr.strip()] = value.strip() device = "" if (nodename := mount_info.get("nodename")): device = nodename if (dev := mount_info.get("dev")): if device: device += ":" device += dev normalized_fields: dict[str, str | dict[str, str]] = { "mount": mount, "device": device or "unknown", "fstype": mount_info.get("vfs") or "unknown", # avoid clobbering the mount point with the AIX mount option "mount" "attributes": mount_info, } yield MountInfoOptions(mount, original, normalized_fields) def gen_mnttab_entries(lines: list[str]) -> t.Iterable[MountInfo]: """Yield tuples from /etc/mnttab columns https://docs.oracle.com/cd/E36784_01/html/E36882/mnttab-4.html. Each tuple contains the mount point, line of origin, and the dictionary of the parsed line. """ if not any(len(fields[4]) == 10 for line in lines for fields in [line.split()]): raise ValueError for line in lines: fields = line.split() datetime.date.fromtimestamp(int(fields[4])) mount_info: dict[str, str | int] = { "device": fields[0], "mount": fields[1], "fstype": fields[2], "options": fields[3], "time": int(fields[4]), } yield MountInfo(fields[1], line, mount_info) def gen_mounts_by_file(file: str) -> t.Iterable[MountInfo | MountInfoOptions]: """Yield parsed mount entries from the first successful generator. Generators are tried in the following order to minimize false positives: - /etc/vfstab: 7 columns - /etc/mnttab: 5 columns (mnttab[4] must contain UNIX timestamp) - /etc/fstab: 4-6 columns (fstab[4] is optional and historically 0-9, but can be any int) - /etc/filesystems: multi-line, not column-based, and specific to AIX """ if (lines := get_file_content(file, "").splitlines()): for gen_mounts in [gen_vfstab_entries, gen_mnttab_entries, gen_fstab_entries, gen_aix_filesystems_entries]: with suppress(IndexError, ValueError): # mpypy error: misc: Incompatible types in "yield from" (actual type "object", expected type "Union[MountInfo, MountInfoOptions] # only works if either # * the list of functions excludes gen_aix_filesystems_entries # * the list of functions only contains gen_aix_filesystems_entries yield from list(gen_mounts(lines)) # type: ignore[misc] break def get_sources(module: AnsibleModule) -> list[str]: """Return a list of filenames from the requested sources.""" sources: list[str] = [] for source in module.params["sources"] or ["all"]: if not source: module.fail_json(msg="sources contains an empty string") if source in {"dynamic", "all"}: sources.extend(DYNAMIC_SOURCES) if source in {"static", "all"}: sources.extend(STATIC_SOURCES) elif source not in {"static", "dynamic", "all"}: sources.append(source) return sources def gen_mounts_by_source(module: AnsibleModule): """Iterate over the sources and yield tuples containing the source, mount point, source line(s), and the parsed result.""" sources = get_sources(module) if len(set(sources)) < len(sources): module.warn(f"mount_facts option 'sources' contains duplicate entries, repeat sources will be ignored: {sources}") mount_fallback = module.params["mount_binary"] and set(sources).intersection(DYNAMIC_SOURCES) seen = set() for source in sources: if source in seen or (real_source := os.path.realpath(source)) in seen: continue if source == "mount": seen.add(source) stdout = run_mount_bin(module, module.params["mount_binary"]) results = [(source, *astuple(mount_info)) for mount_info in gen_mounts_from_stdout(stdout)] else: seen.add(real_source) results = [(source, *astuple(mount_info)) for mount_info in gen_mounts_by_file(source)] if results and source in ("mount", *DYNAMIC_SOURCES): mount_fallback = False yield from results if mount_fallback: stdout = run_mount_bin(module, module.params["mount_binary"]) yield from [("mount", *astuple(mount_info)) for mount_info in gen_mounts_from_stdout(stdout)] def get_mount_facts(module: AnsibleModule): """List and filter mounts, returning all mounts for each unique source.""" seconds = module.params["timeout"] mounts = [] for source, mount, origin, fields in gen_mounts_by_source(module): device = fields["device"] fstype = fields["fstype"] # Convert UUIDs in Linux /etc/fstab to device paths # TODO need similar for OpenBSD which lists UUIDS (without the UUID= prefix) in /etc/fstab, needs another approach though. uuid = None if device.startswith("UUID="): uuid = device.split("=", 1)[1] device = get_device_by_uuid(module, uuid) or device if not any(fnmatch(device, pattern) for pattern in module.params["devices"] or ["*"]): continue if not any(fnmatch(fstype, pattern) for pattern in module.params["fstypes"] or ["*"]): continue timed_func = _timeout.timeout(seconds, f"Timed out getting mount size for mount {mount} (type {fstype})")(get_mount_size) if mount_size := handle_timeout(module)(timed_func)(mount): fields.update(mount_size) if uuid is None: with suppress(subprocess.CalledProcessError): uuid = get_partition_uuid(module, device) fields.update({"ansible_context": {"source": source, "source_data": origin}, "uuid": uuid}) mounts.append(fields) return mounts def handle_deduplication(module, mounts): """Return the unique mount points from the complete list of mounts, and handle the optional aggregate results.""" mount_points = {} mounts_by_source = {} for mount in mounts: mount_point = mount["mount"] source = mount["ansible_context"]["source"] if mount_point not in mount_points: mount_points[mount_point] = mount mounts_by_source.setdefault(source, []).append(mount_point) duplicates_by_src = {src: mnts for src, mnts in mounts_by_source.items() if len(set(mnts)) != len(mnts)} if duplicates_by_src and module.params["include_aggregate_mounts"] is None: duplicates_by_src = {src: mnts for src, mnts in mounts_by_source.items() if len(set(mnts)) != len(mnts)} duplicates_str = ", ".join([f"{src} ({duplicates})" for src, duplicates in duplicates_by_src.items()]) module.warn(f"mount_facts: ignoring repeat mounts in the following sources: {duplicates_str}. " "You can disable this warning by configuring the 'include_aggregate_mounts' option as True or False.") if module.params["include_aggregate_mounts"]: aggregate_mounts = mounts else: aggregate_mounts = [] return mount_points, aggregate_mounts def get_argument_spec(): """Helper returning the argument spec.""" return dict( sources=dict(type="list", elements="str", default=None), mount_binary=dict(default="mount", type="raw"), devices=dict(type="list", elements="str", default=None), fstypes=dict(type="list", elements="str", default=None), timeout=dict(type="float"), on_timeout=dict(choices=["error", "warn", "ignore"], default="error"), include_aggregate_mounts=dict(default=None, type="bool"), ) def main(): module = AnsibleModule( argument_spec=get_argument_spec(), supports_check_mode=True, ) if (seconds := module.params["timeout"]) is not None and seconds <= 0: module.fail_json(msg=f"argument 'timeout' must be a positive number or null, not {seconds}") if (mount_binary := module.params["mount_binary"]) is not None and not isinstance(mount_binary, str): module.fail_json(msg=f"argument 'mount_binary' must be a string or null, not {mount_binary}") mounts = get_mount_facts(module) mount_points, aggregate_mounts = handle_deduplication(module, mounts) module.exit_json(ansible_facts={"mount_points": mount_points, "aggregate_mounts": aggregate_mounts}) if __name__ == "__main__": main()
26,009
Python
.py
557
38.962298
155
0.64595
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,204
yumdnf.py
ansible_ansible/lib/ansible/module_utils/yumdnf.py
# -*- coding: utf-8 -*- # # # Copyright: (c) 2012, Red Hat, Inc # Written by Seth Vidal <skvidal at fedoraproject.org> # Contributing Authors: # - Ansible Core Team # - Eduard Snesarev (@verm666) # - Berend De Schouwer (@berenddeschouwer) # - Abhijeet Kasurde (@Akasurde) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations from abc import ABCMeta, abstractmethod yumdnf_argument_spec = dict( argument_spec=dict( allow_downgrade=dict(type='bool', default=False), allowerasing=dict(default=False, type="bool"), autoremove=dict(type='bool', default=False), best=dict(type="bool"), bugfix=dict(required=False, type='bool', default=False), cacheonly=dict(type='bool', default=False), conf_file=dict(type='str'), disable_excludes=dict(type='str', default=None), disable_gpg_check=dict(type='bool', default=False), disable_plugin=dict(type='list', elements='str', default=[]), disablerepo=dict(type='list', elements='str', default=[]), download_only=dict(type='bool', default=False), download_dir=dict(type='str', default=None), enable_plugin=dict(type='list', elements='str', default=[]), enablerepo=dict(type='list', elements='str', default=[]), exclude=dict(type='list', elements='str', default=[]), installroot=dict(type='str', default="/"), install_repoquery=dict( type='bool', default=True, removed_in_version='2.20', removed_from_collection='ansible.builtin', ), install_weak_deps=dict(type='bool', default=True), list=dict(type='str'), name=dict(type='list', elements='str', aliases=['pkg'], default=[]), nobest=dict(type="bool"), releasever=dict(default=None), security=dict(type='bool', default=False), skip_broken=dict(type='bool', default=False), # removed==absent, installed==present, these are accepted as aliases state=dict(type='str', default=None, choices=['absent', 'installed', 'latest', 'present', 'removed']), update_cache=dict(type='bool', default=False, aliases=['expire-cache']), update_only=dict(required=False, default="no", type='bool'), validate_certs=dict(type='bool', default=True), sslverify=dict(type='bool', default=True), lock_timeout=dict(type='int', default=30), ), required_one_of=[['name', 'list', 'update_cache']], mutually_exclusive=[['name', 'list'], ['best', 'nobest']], supports_check_mode=True, ) class YumDnf(metaclass=ABCMeta): """ Abstract class that handles the population of instance variables that should be identical between both YUM and DNF modules because of the feature parity and shared argument spec """ def __init__(self, module): self.module = module self.allow_downgrade = self.module.params['allow_downgrade'] self.allowerasing = self.module.params['allowerasing'] self.autoremove = self.module.params['autoremove'] self.best = self.module.params['best'] self.bugfix = self.module.params['bugfix'] self.cacheonly = self.module.params['cacheonly'] self.conf_file = self.module.params['conf_file'] self.disable_excludes = self.module.params['disable_excludes'] self.disable_gpg_check = self.module.params['disable_gpg_check'] self.disable_plugin = self.module.params['disable_plugin'] self.disablerepo = self.module.params.get('disablerepo', []) self.download_only = self.module.params['download_only'] self.download_dir = self.module.params['download_dir'] self.enable_plugin = self.module.params['enable_plugin'] self.enablerepo = self.module.params.get('enablerepo', []) self.exclude = self.module.params['exclude'] self.installroot = self.module.params['installroot'] self.install_repoquery = self.module.params['install_repoquery'] self.install_weak_deps = self.module.params['install_weak_deps'] self.list = self.module.params['list'] self.names = [p.strip() for p in self.module.params['name']] self.nobest = self.module.params['nobest'] self.releasever = self.module.params['releasever'] self.security = self.module.params['security'] self.skip_broken = self.module.params['skip_broken'] self.state = self.module.params['state'] self.update_only = self.module.params['update_only'] self.update_cache = self.module.params['update_cache'] self.validate_certs = self.module.params['validate_certs'] self.sslverify = self.module.params['sslverify'] self.lock_timeout = self.module.params['lock_timeout'] # It's possible someone passed a comma separated string since it used # to be a string type, so we should handle that self.names = self.listify_comma_sep_strings_in_list(self.names) self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo) self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo) self.exclude = self.listify_comma_sep_strings_in_list(self.exclude) # Fail if someone passed a space separated string # https://github.com/ansible/ansible/issues/46301 for name in self.names: if ' ' in name and not any(spec in name for spec in ['@', '>', '<', '=']): module.fail_json( msg='It appears that a space separated string of packages was passed in ' 'as an argument. To operate on several packages, pass a comma separated ' 'string of packages or a list of packages.' ) # Sanity checking for autoremove if self.state is None: if self.autoremove: self.state = "absent" else: self.state = "present" if self.autoremove and (self.state != "absent"): self.module.fail_json( msg="Autoremove should be used alone or with state=absent", results=[], ) def listify_comma_sep_strings_in_list(self, some_list): """ method to accept a list of strings as the parameter, find any strings in that list that are comma separated, remove them from the list and add their comma separated elements to the original list """ new_list = [] remove_from_original_list = [] for element in some_list: if ',' in element: remove_from_original_list.append(element) new_list.extend([e.strip() for e in element.split(',')]) for element in remove_from_original_list: some_list.remove(element) some_list.extend(new_list) if some_list == [""]: return [] return some_list @abstractmethod def run(self): raise NotImplementedError
7,091
Python
.py
140
41.721429
110
0.638685
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,205
errors.py
ansible_ansible/lib/ansible/module_utils/errors.py
# -*- coding: utf-8 -*- # Copyright (c) 2021 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations class AnsibleFallbackNotFound(Exception): """Fallback validator was not found""" class AnsibleValidationError(Exception): """Single argument spec validation error""" def __init__(self, message): super(AnsibleValidationError, self).__init__(message) self.error_message = message """The error message passed in when the exception was raised.""" @property def msg(self): """The error message passed in when the exception was raised.""" return self.args[0] class AnsibleValidationErrorMultiple(AnsibleValidationError): """Multiple argument spec validation errors""" def __init__(self, errors=None): self.errors = errors[:] if errors else [] """:class:`list` of :class:`AnsibleValidationError` objects""" def __getitem__(self, key): return self.errors[key] def __setitem__(self, key, value): self.errors[key] = value def __delitem__(self, key): del self.errors[key] @property def msg(self): """The first message from the first error in ``errors``.""" return self.errors[0].args[0] @property def messages(self): """:class:`list` of each error message in ``errors``.""" return [err.msg for err in self.errors] def append(self, error): """Append a new error to ``self.errors``. Only :class:`AnsibleValidationError` should be added. """ self.errors.append(error) def extend(self, errors): """Append each item in ``errors`` to ``self.errors``. Only :class:`AnsibleValidationError` should be added.""" self.errors.extend(errors) class AliasError(AnsibleValidationError): """Error handling aliases""" class ArgumentTypeError(AnsibleValidationError): """Error with parameter type""" class ArgumentValueError(AnsibleValidationError): """Error with parameter value""" class DeprecationError(AnsibleValidationError): """Error processing parameter deprecations""" class ElementError(AnsibleValidationError): """Error when validating elements""" class MutuallyExclusiveError(AnsibleValidationError): """Mutually exclusive parameters were supplied""" class NoLogError(AnsibleValidationError): """Error converting no_log values""" class RequiredByError(AnsibleValidationError): """Error with parameters that are required by other parameters""" class RequiredDefaultError(AnsibleValidationError): """A required parameter was assigned a default value""" class RequiredError(AnsibleValidationError): """Missing a required parameter""" class RequiredIfError(AnsibleValidationError): """Error with conditionally required parameters""" class RequiredOneOfError(AnsibleValidationError): """Error with parameters where at least one is required""" class RequiredTogetherError(AnsibleValidationError): """Error with parameters that are required together""" class SubParameterTypeError(AnsibleValidationError): """Incorrect type for subparameter""" class UnsupportedError(AnsibleValidationError): """Unsupported parameters were supplied"""
3,345
Python
.py
73
40.260274
118
0.724791
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,206
ansible_release.py
ansible_ansible/lib/ansible/module_utils/ansible_release.py
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations __version__ = '2.19.0.dev0' __author__ = 'Ansible, Inc.' __codename__ = "What Is and What Should Never Be"
857
Python
.py
20
41.75
70
0.756886
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,207
splitter.py
ansible_ansible/lib/ansible/module_utils/splitter.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations def _get_quote_state(token, quote_char): """ the goal of this block is to determine if the quoted string is unterminated in which case it needs to be put back together """ # the char before the current one, used to see if # the current character is escaped prev_char = None for idx, cur_char in enumerate(token): if idx > 0: prev_char = token[idx - 1] if cur_char in '"\'' and prev_char != '\\': if quote_char: if cur_char == quote_char: quote_char = None else: quote_char = cur_char return quote_char def _count_jinja2_blocks(token, cur_depth, open_token, close_token): """ this function counts the number of opening/closing blocks for a given opening/closing type and adjusts the current depth for that block based on the difference """ num_open = token.count(open_token) num_close = token.count(close_token) if num_open != num_close: cur_depth += (num_open - num_close) if cur_depth < 0: cur_depth = 0 return cur_depth def split_args(args): """ Splits args on whitespace, but intelligently reassembles those that may have been split over a jinja2 block or quotes. When used in a remote module, we won't ever have to be concerned about jinja2 blocks, however this function is/will be used in the core portions as well before the args are templated. example input: a=b c="foo bar" example output: ['a=b', 'c="foo bar"'] Basically this is a variation shlex that has some more intelligence for how Ansible needs to use it. """ # the list of params parsed out of the arg string # this is going to be the result value when we are done params = [] # here we encode the args, so we have a uniform charset to # work with, and split on white space args = args.strip() try: args = args.encode('utf-8') do_decode = True except UnicodeDecodeError: do_decode = False items = args.split('\n') # iterate over the tokens, and reassemble any that may have been # split on a space inside a jinja2 block. # ex if tokens are "{{", "foo", "}}" these go together # These variables are used # to keep track of the state of the parsing, since blocks and quotes # may be nested within each other. quote_char = None inside_quotes = False print_depth = 0 # used to count nested jinja2 {{ }} blocks block_depth = 0 # used to count nested jinja2 {% %} blocks comment_depth = 0 # used to count nested jinja2 {# #} blocks # now we loop over each split chunk, coalescing tokens if the white space # split occurred within quotes or a jinja2 block of some kind for itemidx, item in enumerate(items): # we split on spaces and newlines separately, so that we # can tell which character we split on for reassembly # inside quotation characters tokens = item.strip().split(' ') line_continuation = False for idx, token in enumerate(tokens): # if we hit a line continuation character, but # we're not inside quotes, ignore it and continue # on to the next token while setting a flag if token == '\\' and not inside_quotes: line_continuation = True continue # store the previous quoting state for checking later was_inside_quotes = inside_quotes quote_char = _get_quote_state(token, quote_char) inside_quotes = quote_char is not None # multiple conditions may append a token to the list of params, # so we keep track with this flag to make sure it only happens once # append means add to the end of the list, don't append means concatenate # it to the end of the last token appended = False # if we're inside quotes now, but weren't before, append the token # to the end of the list, since we'll tack on more to it later # otherwise, if we're inside any jinja2 block, inside quotes, or we were # inside quotes (but aren't now) concat this token to the last param if inside_quotes and not was_inside_quotes: params.append(token) appended = True elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes: if idx == 0 and not inside_quotes and was_inside_quotes: params[-1] = "%s%s" % (params[-1], token) elif len(tokens) > 1: spacer = '' if idx > 0: spacer = ' ' params[-1] = "%s%s%s" % (params[-1], spacer, token) else: spacer = '' if not params[-1].endswith('\n') and idx == 0: spacer = '\n' params[-1] = "%s%s%s" % (params[-1], spacer, token) appended = True # if the number of paired block tags is not the same, the depth has changed, so we calculate that here # and may append the current token to the params (if we haven't previously done so) prev_print_depth = print_depth print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}") if print_depth != prev_print_depth and not appended: params.append(token) appended = True prev_block_depth = block_depth block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}") if block_depth != prev_block_depth and not appended: params.append(token) appended = True prev_comment_depth = comment_depth comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}") if comment_depth != prev_comment_depth and not appended: params.append(token) appended = True # finally, if we're at zero depth for all blocks and not inside quotes, and have not # yet appended anything to the list of params, we do so now if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '': params.append(token) # if this was the last token in the list, and we have more than # one item (meaning we split on newlines), add a newline back here # to preserve the original structure if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation: if not params[-1].endswith('\n') or item == '': params[-1] += '\n' # always clear the line continuation flag line_continuation = False # If we're done and things are not at zero depth or we're still inside quotes, # raise an error to indicate that the args were unbalanced if print_depth or block_depth or comment_depth or inside_quotes: raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes") # finally, we decode each param back to the unicode it was in the arg string if do_decode: params = [x.decode('utf-8') for x in params] return params def is_quoted(data): return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'") def unquote(data): """ removes first and last quotes from a string, if the string starts and ends with the same quotes """ if is_quoted(data): return data[1:-1] return data
9,468
Python
.py
183
43.027322
120
0.644432
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,208
json_utils.py
ansible_ansible/lib/ansible/module_utils/json_utils.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import annotations import json # pylint: disable=unused-import # NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any # changes are propagated there. def _filter_non_json_lines(data, objects_only=False): """ Used to filter unrelated output around module JSON output, like messages from tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). Filters leading lines before first line-starting occurrence of '{' or '[', and filter all trailing lines after matching close character (working from the bottom of output). """ warnings = [] # Filter initial junk lines = data.splitlines() for start, line in enumerate(lines): line = line.strip() if line.startswith(u'{'): endchar = u'}' break elif not objects_only and line.startswith(u'['): endchar = u']' break else: raise ValueError('No start of json char found') # Filter trailing junk lines = lines[start:] for reverse_end_offset, line in enumerate(reversed(lines)): if line.strip().endswith(endchar): break else: raise ValueError('No end of json char found') if reverse_end_offset > 0: # Trailing junk is uncommon and can point to things the user might # want to change. So print a warning if we find any trailing_junk = lines[len(lines) - reverse_end_offset:] for line in trailing_junk: if line.strip(): warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk)) break lines = lines[:(len(lines) - reverse_end_offset)] return ('\n'.join(lines), warnings)
3,403
Python
.py
66
46.5
112
0.719398
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,209
urls.py
ansible_ansible/lib/ansible/module_utils/urls.py
# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com>, 2015 # Copyright: Contributors to the Ansible project # # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """ The **urls** utils module offers a replacement for the urllib python library. urllib is the python stdlib way to retrieve files from the Internet but it lacks some security features (around verifying SSL certificates) that users should care about in most situations. Using the functions in this module corrects deficiencies in the urllib module wherever possible. There are also third-party libraries (for instance, requests) which can be used to replace urllib with a more secure library. However, all third party libraries require that the library be installed on the managed machine. That is an extra step for users making use of a module. If possible, avoid third party libraries by using this code instead. """ from __future__ import annotations import base64 import email.mime.application import email.mime.multipart import email.mime.nonmultipart import email.parser import email.policy import email.utils import http.client import mimetypes import netrc import os import platform import re import socket import tempfile import traceback import types # pylint: disable=unused-import import urllib.error import urllib.request from contextlib import contextmanager from http import cookiejar from urllib.parse import unquote, urlparse, urlunparse from urllib.request import BaseHandler try: import gzip HAS_GZIP = True GZIP_IMP_ERR = None except ImportError: HAS_GZIP = False GZIP_IMP_ERR = traceback.format_exc() GzipFile = object else: GzipFile = gzip.GzipFile # type: ignore[assignment,misc] from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.collections import Mapping, is_sequence from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text try: import ssl HAS_SSL = True except Exception: HAS_SSL = False HAS_CRYPTOGRAPHY = True try: from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.exceptions import UnsupportedAlgorithm except ImportError: HAS_CRYPTOGRAPHY = False # Old import for GSSAPI authentication, this is not used in urls.py but kept for backwards compatibility. try: import urllib_gssapi # pylint: disable=unused-import HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False GSSAPI_IMP_ERR = None try: import gssapi class HTTPGSSAPIAuthHandler(BaseHandler): """ Handles Negotiate/Kerberos support through the gssapi library. """ AUTH_HEADER_PATTERN = re.compile(r'(?:.*)\s*(Negotiate|Kerberos)\s*([^,]*),?', re.I) handler_order = 480 # Handle before Digest authentication def __init__(self, username=None, password=None): self.username = username self.password = password self._context = None def get_auth_value(self, headers): auth_match = self.AUTH_HEADER_PATTERN.search(headers.get('www-authenticate', '')) if auth_match: return auth_match.group(1), base64.b64decode(auth_match.group(2)) def http_error_401(self, req, fp, code, msg, headers): # If we've already attempted the auth and we've reached this again then there was a failure. if self._context: return parsed = urlparse(req.get_full_url()) auth_header = self.get_auth_value(headers) if not auth_header: return auth_protocol, in_token = auth_header username = None if self.username: username = gssapi.Name(self.username, name_type=gssapi.NameType.user) if username and self.password: if not hasattr(gssapi.raw, 'acquire_cred_with_password'): raise NotImplementedError("Platform GSSAPI library does not support " "gss_acquire_cred_with_password, cannot acquire GSSAPI credential with " "explicit username and password.") b_password = to_bytes(self.password, errors='surrogate_or_strict') cred = gssapi.raw.acquire_cred_with_password(username, b_password, usage='initiate').creds else: cred = gssapi.Credentials(name=username, usage='initiate') # Get the peer certificate for the channel binding token if possible (HTTPS). A bug on macOS causes the # authentication to fail when the CBT is present. Just skip that platform. cbt = None cert = getpeercert(fp, True) if cert and platform.system() != 'Darwin': cert_hash = get_channel_binding_cert_hash(cert) if cert_hash: cbt = gssapi.raw.ChannelBindings(application_data=b"tls-server-end-point:" + cert_hash) # TODO: We could add another option that is set to include the port in the SPN if desired in the future. target = gssapi.Name("HTTP@%s" % parsed.hostname, gssapi.NameType.hostbased_service) self._context = gssapi.SecurityContext(usage="initiate", name=target, creds=cred, channel_bindings=cbt) resp = None while not self._context.complete: out_token = self._context.step(in_token) if not out_token: break auth_header = '%s %s' % (auth_protocol, to_native(base64.b64encode(out_token))) req.add_unredirected_header('Authorization', auth_header) resp = self.parent.open(req) # The response could contain a token that the client uses to validate the server auth_header = self.get_auth_value(resp.headers) if not auth_header: break in_token = auth_header[1] return resp except ImportError: GSSAPI_IMP_ERR = traceback.format_exc() HTTPGSSAPIAuthHandler = None # type: types.ModuleType | None # type: ignore[no-redef] PEM_CERT_RE = re.compile( r'^-----BEGIN CERTIFICATE-----\n.+?-----END CERTIFICATE-----$', flags=re.M | re.S ) # # Exceptions # class ConnectionError(Exception): """Failed to connect to the server""" pass class ProxyError(ConnectionError): """Failure to connect because of a proxy""" pass class SSLValidationError(ConnectionError): """Failure to connect due to SSL validation failing No longer used, but kept for backwards compatibility """ pass class NoSSLError(SSLValidationError): """Needed to connect to an HTTPS url but no ssl library available to verify the certificate No longer used, but kept for backwards compatibility """ pass class MissingModuleError(Exception): """Failed to import 3rd party module required by the caller""" def __init__(self, message, import_traceback, module=None): super().__init__(message) self.import_traceback = import_traceback self.module = module UnixHTTPSHandler = None UnixHTTPSConnection = None if HAS_SSL: @contextmanager def unix_socket_patch_httpconnection_connect(): """Monkey patch ``http.client.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect`` so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the correct behavior of creating self.sock for the unix socket """ _connect = http.client.HTTPConnection.connect http.client.HTTPConnection.connect = UnixHTTPConnection.connect yield http.client.HTTPConnection.connect = _connect class UnixHTTPSConnection(http.client.HTTPSConnection): # type: ignore[no-redef] def __init__(self, unix_socket): self._unix_socket = unix_socket def connect(self): # This method exists simply to ensure we monkeypatch # http.client.HTTPConnection.connect to call UnixHTTPConnection.connect with unix_socket_patch_httpconnection_connect(): # Disable pylint check for the super() call. It complains about UnixHTTPSConnection # being a NoneType because of the initial definition above, but it won't actually # be a NoneType when this code runs super().connect() def __call__(self, *args, **kwargs): super().__init__(*args, **kwargs) return self class UnixHTTPSHandler(urllib.request.HTTPSHandler): # type: ignore[no-redef] def __init__(self, unix_socket, **kwargs): super().__init__(**kwargs) self._unix_socket = unix_socket def https_open(self, req): kwargs = {} try: # deprecated: description='deprecated check_hostname' python_version='3.12' kwargs['check_hostname'] = self._check_hostname except AttributeError: pass return self.do_open( UnixHTTPSConnection(self._unix_socket), req, context=self._context, **kwargs ) class UnixHTTPConnection(http.client.HTTPConnection): """Handles http requests to a unix socket file""" def __init__(self, unix_socket): self._unix_socket = unix_socket def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: self.sock.connect(self._unix_socket) except OSError as e: raise OSError('Invalid Socket File (%s): %s' % (self._unix_socket, e)) if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: self.sock.settimeout(self.timeout) def __call__(self, *args, **kwargs): super().__init__(*args, **kwargs) return self class UnixHTTPHandler(urllib.request.HTTPHandler): """Handler for Unix urls""" def __init__(self, unix_socket, **kwargs): super().__init__(**kwargs) self._unix_socket = unix_socket def http_open(self, req): return self.do_open(UnixHTTPConnection(self._unix_socket), req) class ParseResultDottedDict(dict): """ A dict that acts similarly to the ParseResult named tuple from urllib """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__dict__ = self def as_list(self): """ Generate a list from this dict, that looks like the ParseResult named tuple """ return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')] def generic_urlparse(parts): """ Returns a dictionary of url parts as parsed by urlparse, but accounts for the fact that older versions of that library do not support named attributes (ie. .netloc) This method isn't of much use any longer, but is kept in a minimal state for backwards compat. """ result = ParseResultDottedDict(parts._asdict()) result.update({ 'username': parts.username, 'password': parts.password, 'hostname': parts.hostname, 'port': parts.port, }) return result def extract_pem_certs(data): for match in PEM_CERT_RE.finditer(data): yield match.group(0) def get_response_filename(response): if filename := response.headers.get_param('filename', header='content-disposition'): filename = os.path.basename(filename) else: url = response.geturl() path = urlparse(url)[2] filename = os.path.basename(path.rstrip('/')) or None if filename: filename = unquote(filename) return filename def parse_content_type(response): get_type = response.headers.get_content_type get_param = response.headers.get_param content_type = (get_type() or 'application/octet-stream').split(',')[0] main_type, sub_type = content_type.split('/') charset = (get_param('charset') or 'utf-8').split(',')[0] return content_type, main_type, sub_type, charset class GzipDecodedReader(GzipFile): """A file-like object to decode a response encoded with the gzip method, as described in RFC 1952. Largely copied from ``xmlrpclib``/``xmlrpc.client`` """ def __init__(self, fp): if not HAS_GZIP: raise MissingModuleError(self.missing_gzip_error(), import_traceback=GZIP_IMP_ERR) self._io = fp super().__init__(mode='rb', fileobj=self._io) def close(self): try: gzip.GzipFile.close(self) finally: self._io.close() @staticmethod def missing_gzip_error(): return missing_required_lib( 'gzip', reason='to decompress gzip encoded responses. ' 'Set "decompress" to False, to prevent attempting auto decompression' ) class HTTPRedirectHandler(urllib.request.HTTPRedirectHandler): """This is an implementation of a RedirectHandler to match the functionality provided by httplib2. It will utilize the value of ``follow_redirects`` to determine how redirects should be handled in urllib. """ def __init__(self, follow_redirects=None): self.follow_redirects = follow_redirects def __call__(self, *args, **kwargs): super().__init__(*args, **kwargs) return self try: urllib.request.HTTPRedirectHandler.http_error_308 # type: ignore[attr-defined] except AttributeError: # deprecated: description='urllib http 308 support' python_version='3.11' http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302 def redirect_request(self, req, fp, code, msg, headers, newurl): follow_redirects = self.follow_redirects # Preserve urllib2 compatibility if follow_redirects in ('urllib2', 'urllib'): return urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl) # Handle disabled redirects elif follow_redirects in ('no', 'none', False): raise urllib.error.HTTPError(newurl, code, msg, headers, fp) method = req.get_method() # Handle non-redirect HTTP status or invalid follow_redirects if follow_redirects in ('all', 'yes', True): if code < 300 or code >= 400: raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) elif follow_redirects == 'safe': if code < 300 or code >= 400 or method not in ('GET', 'HEAD'): raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) else: raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) data = req.data origin_req_host = req.origin_req_host # Be conciliant with URIs containing a space newurl = newurl.replace(' ', '%20') # Support redirect with payload and original headers if code in (307, 308): # Preserve payload and headers req_headers = req.headers else: # Do not preserve payload and filter headers data = None req_headers = {k: v for k, v in req.headers.items() if k.lower() not in ("content-length", "content-type", "transfer-encoding")} # http://tools.ietf.org/html/rfc7231#section-6.4.4 if code == 303 and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if code == 302 and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. if code == 301 and method == 'POST': method = 'GET' return urllib.request.Request( newurl, data=data, headers=req_headers, origin_req_host=origin_req_host, unverifiable=True, method=method.upper(), ) def make_context(cafile=None, cadata=None, capath=None, ciphers=None, validate_certs=True, client_cert=None, client_key=None): if ciphers is None: ciphers = [] if not is_sequence(ciphers): raise TypeError('Ciphers must be a list. Got %s.' % ciphers.__class__.__name__) context = ssl.create_default_context(cafile=cafile) if not validate_certs: context.options |= ssl.OP_NO_SSLv3 context.check_hostname = False context.verify_mode = ssl.CERT_NONE # If cafile is passed, we are only using that for verification, # don't add additional ca certs if validate_certs and not cafile: if not cadata: cadata = bytearray() cadata.extend(get_ca_certs(capath=capath)[0]) if cadata: context.load_verify_locations(cadata=cadata) if ciphers: context.set_ciphers(':'.join(map(to_native, ciphers))) if client_cert: # TLS 1.3 needs this to be set to True to allow post handshake cert # authentication. This functionality was added in Python 3.8 and was # backported to 3.6.7, and 3.7.1 so needs a check for now. if hasattr(context, "post_handshake_auth"): context.post_handshake_auth = True context.load_cert_chain(client_cert, keyfile=client_key) return context def get_ca_certs(cafile=None, capath=None): # tries to find a valid CA cert in one of the # standard locations for the current distribution # Using a dict, instead of a set for order, the value is meaningless and will be None # Not directly using a bytearray to avoid duplicates with fast lookup cadata = {} # If cafile is passed, we are only using that for verification, # don't add additional ca certs if cafile: paths_checked = [cafile] with open(to_bytes(cafile, errors='surrogate_or_strict'), 'r', errors='surrogateescape') as f: for pem in extract_pem_certs(f.read()): b_der = ssl.PEM_cert_to_DER_cert(pem) cadata[b_der] = None return bytearray().join(cadata), paths_checked default_verify_paths = ssl.get_default_verify_paths() default_capath = default_verify_paths.capath paths_checked = {default_capath or default_verify_paths.cafile} if capath: paths_checked.add(capath) system = to_text(platform.system(), errors='surrogate_or_strict') # build a list of paths to check for .crt/.pem files # based on the platform type if system == u'Linux': paths_checked.add('/etc/pki/ca-trust/extracted/pem') paths_checked.add('/etc/pki/tls/certs') paths_checked.add('/usr/share/ca-certificates/cacert.org') elif system == u'FreeBSD': paths_checked.add('/usr/local/share/certs') elif system == u'OpenBSD': paths_checked.add('/etc/ssl') elif system == u'NetBSD': paths_checked.add('/etc/openssl/certs') elif system == u'SunOS': paths_checked.add('/opt/local/etc/openssl/certs') elif system == u'AIX': paths_checked.add('/var/ssl/certs') paths_checked.add('/opt/freeware/etc/ssl/certs') elif system == u'Darwin': paths_checked.add('/usr/local/etc/openssl') # fall back to a user-deployed cert in a standard # location if the OS platform one is not available paths_checked.add('/etc/ansible') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use # in the ssl check to speed up the test for path in paths_checked: if not path or path == default_capath or not os.path.isdir(path): continue for f in os.listdir(path): full_path = os.path.join(path, f) if os.path.isfile(full_path) and os.path.splitext(f)[1] in {'.pem', '.cer', '.crt'}: try: with open(full_path, 'r', errors='surrogateescape') as cert_file: cert = cert_file.read() try: for pem in extract_pem_certs(cert): b_der = ssl.PEM_cert_to_DER_cert(pem) cadata[b_der] = None except Exception: continue except (OSError, IOError): pass # paths_checked isn't used any more, but is kept just for ease of debugging return bytearray().join(cadata), list(paths_checked) def getpeercert(response, binary_form=False): """ Attempt to get the peer certificate of the response from urlopen. """ socket = response.fp.raw._sock try: return socket.getpeercert(binary_form) except AttributeError: pass # Not HTTPS def get_channel_binding_cert_hash(certificate_der): """ Gets the channel binding app data for a TLS connection using the peer cert. """ if not HAS_CRYPTOGRAPHY: return # Logic documented in RFC 5929 section 4 https://tools.ietf.org/html/rfc5929#section-4 cert = x509.load_der_x509_certificate(certificate_der, default_backend()) hash_algorithm = None try: hash_algorithm = cert.signature_hash_algorithm except UnsupportedAlgorithm: pass # If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256. if not hash_algorithm or hash_algorithm.name in ('md5', 'sha1'): hash_algorithm = hashes.SHA256() digest = hashes.Hash(hash_algorithm, default_backend()) digest.update(certificate_der) return digest.finalize() def rfc2822_date_string(timetuple, zone='-0000'): """Accepts a timetuple and optional zone which defaults to ``-0000`` and returns a date string as specified by RFC 2822, e.g.: Fri, 09 Nov 2001 01:08:47 -0000 Copied from email.utils.formatdate and modified for separate use """ return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]], timetuple[2], ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1], timetuple[0], timetuple[3], timetuple[4], timetuple[5], zone) def _configure_auth(url, url_username, url_password, use_gssapi, force_basic_auth, use_netrc): headers = {} handlers = [] parsed = urlparse(url) if parsed.scheme == 'ftp': return url, headers, handlers username = url_username password = url_password if username: netloc = parsed.netloc elif '@' in parsed.netloc: credentials, netloc = parsed.netloc.split('@', 1) if ':' in credentials: username, password = credentials.split(':', 1) else: username = credentials password = '' username = unquote(username) password = unquote(password) # reconstruct url without credentials url = urlunparse(parsed._replace(netloc=netloc)) if use_gssapi: if HTTPGSSAPIAuthHandler: # type: ignore[truthy-function] handlers.append(HTTPGSSAPIAuthHandler(username, password)) else: imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True', url='https://pypi.org/project/gssapi/') raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR) elif username and not force_basic_auth: passman = urllib.request.HTTPPasswordMgrWithDefaultRealm() # this creates a password manager passman.add_password(None, netloc, username, password) # because we have put None at the start it will always # use this username/password combination for urls # for which `theurl` is a super-url authhandler = urllib.request.HTTPBasicAuthHandler(passman) digest_authhandler = urllib.request.HTTPDigestAuthHandler(passman) # create the AuthHandler handlers.append(authhandler) handlers.append(digest_authhandler) elif username and force_basic_auth: headers["Authorization"] = basic_auth_header(username, password) elif use_netrc: try: rc = netrc.netrc(os.environ.get('NETRC')) login = rc.authenticators(parsed.hostname) except IOError: login = None if login: username, dummy, password = login if username and password: headers["Authorization"] = basic_auth_header(username, password) return url, headers, handlers class Request: def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False, follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None, ca_path=None, unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True, context=None): """This class works somewhat similarly to the ``Session`` class of from requests by defining a cookiejar that can be used across requests as well as cascaded defaults that can apply to repeated requests For documentation of params, see ``Request.open`` >>> from ansible.module_utils.urls import Request >>> r = Request() >>> r.open('GET', 'http://httpbin.org/cookies/set?k1=v1').read() '{\n "cookies": {\n "k1": "v1"\n }\n}\n' >>> r = Request(url_username='user', url_password='passwd') >>> r.open('GET', 'http://httpbin.org/basic-auth/user/passwd').read() '{\n "authenticated": true, \n "user": "user"\n}\n' >>> r = Request(headers=dict(foo='bar')) >>> r.open('GET', 'http://httpbin.org/get', headers=dict(baz='qux')).read() """ self.headers = headers or {} if not isinstance(self.headers, dict): raise ValueError("headers must be a dict: %r" % self.headers) self.use_proxy = use_proxy self.force = force self.timeout = timeout self.validate_certs = validate_certs self.url_username = url_username self.url_password = url_password self.http_agent = http_agent self.force_basic_auth = force_basic_auth self.follow_redirects = follow_redirects self.client_cert = client_cert self.client_key = client_key self.unix_socket = unix_socket self.ca_path = ca_path self.unredirected_headers = unredirected_headers self.decompress = decompress self.ciphers = ciphers self.use_netrc = use_netrc self.context = context if isinstance(cookies, cookiejar.CookieJar): self.cookies = cookies else: self.cookies = cookiejar.CookieJar() def _fallback(self, value, fallback): if value is None: return fallback return value def open(self, method, url, data=None, headers=None, use_proxy=None, force=None, last_mod_time=None, timeout=None, validate_certs=None, url_username=None, url_password=None, http_agent=None, force_basic_auth=None, follow_redirects=None, client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None, decompress=None, ciphers=None, use_netrc=None, context=None): """ Sends a request via HTTP(S) or FTP using urllib (Python3) Does not require the module environment Returns :class:`HTTPResponse` object. :arg method: method for the request :arg url: URL to request :kwarg data: (optional) bytes, or file-like object to send in the body of the request :kwarg headers: (optional) Dictionary of HTTP Headers to send with the request :kwarg use_proxy: (optional) Boolean of whether or not to use proxy :kwarg force: (optional) Boolean of whether or not to set `cache-control: no-cache` header :kwarg last_mod_time: (optional) Datetime object to use when setting If-Modified-Since header :kwarg timeout: (optional) How long to wait for the server to send data before giving up, as a float :kwarg validate_certs: (optional) Booleani that controls whether we verify the server's TLS certificate :kwarg url_username: (optional) String of the user to use when authenticating :kwarg url_password: (optional) String of the password to use when authenticating :kwarg http_agent: (optional) String of the User-Agent to use in the request :kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request :kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are followed, see HTTPRedirectHandler for more information :kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key as well, and if the key is included, client_key is not required :kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client authentication. If client_cert contains both the certificate and key, this option is not required :kwarg cookies: (optional) CookieJar object to send with the request :kwarg use_gssapi: (optional) Use GSSAPI handler of requests. :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing connection to the provided url :kwarg ca_path: (optional) String of file system path to CA cert bundle to use :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request :kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses :kwarg ciphers: (optional) List of ciphers to use :kwarg use_netrc: (optional) Boolean determining whether to use credentials from ~/.netrc file :kwarg context: (optional) ssl.Context object for SSL validation. When provided, all other SSL related arguments are ignored. See make_context. :returns: HTTPResponse. Added in Ansible 2.9 """ if headers is None: headers = {} elif not isinstance(headers, dict): raise ValueError("headers must be a dict") headers = dict(self.headers, **headers) use_proxy = self._fallback(use_proxy, self.use_proxy) force = self._fallback(force, self.force) timeout = self._fallback(timeout, self.timeout) validate_certs = self._fallback(validate_certs, self.validate_certs) url_username = self._fallback(url_username, self.url_username) url_password = self._fallback(url_password, self.url_password) http_agent = self._fallback(http_agent, self.http_agent) force_basic_auth = self._fallback(force_basic_auth, self.force_basic_auth) follow_redirects = self._fallback(follow_redirects, self.follow_redirects) client_cert = self._fallback(client_cert, self.client_cert) client_key = self._fallback(client_key, self.client_key) cookies = self._fallback(cookies, self.cookies) unix_socket = self._fallback(unix_socket, self.unix_socket) ca_path = self._fallback(ca_path, self.ca_path) unredirected_headers = self._fallback(unredirected_headers, self.unredirected_headers) decompress = self._fallback(decompress, self.decompress) ciphers = self._fallback(ciphers, self.ciphers) use_netrc = self._fallback(use_netrc, self.use_netrc) context = self._fallback(context, self.context) handlers = [] if unix_socket: handlers.append(UnixHTTPHandler(unix_socket)) url, auth_headers, auth_handlers = _configure_auth(url, url_username, url_password, use_gssapi, force_basic_auth, use_netrc) headers.update(auth_headers) handlers.extend(auth_handlers) if not use_proxy: proxyhandler = urllib.request.ProxyHandler({}) handlers.append(proxyhandler) if not context: context = make_context( cafile=ca_path, ciphers=ciphers, validate_certs=validate_certs, client_cert=client_cert, client_key=client_key, ) if unix_socket: ssl_handler = UnixHTTPSHandler(unix_socket=unix_socket, context=context) else: ssl_handler = urllib.request.HTTPSHandler(context=context) handlers.append(ssl_handler) handlers.append(HTTPRedirectHandler(follow_redirects)) # add some nicer cookie handling if cookies is not None: handlers.append(urllib.request.HTTPCookieProcessor(cookies)) opener = urllib.request.build_opener(*handlers) urllib.request.install_opener(opener) data = to_bytes(data, nonstring='passthru') request = urllib.request.Request(url, data=data, method=method.upper()) # add the custom agent header, to help prevent issues # with sites that block the default urllib agent string if http_agent: request.add_header('User-agent', http_agent) # Cache control # Either we directly force a cache refresh if force: request.add_header('cache-control', 'no-cache') # or we do it if the original is more recent than our copy elif last_mod_time: tstamp = rfc2822_date_string(last_mod_time.timetuple(), 'GMT') request.add_header('If-Modified-Since', tstamp) # user defined headers now, which may override things we've set above unredirected_headers = [h.lower() for h in (unredirected_headers or [])] for header in headers: if header.lower() in unredirected_headers: request.add_unredirected_header(header, headers[header]) else: request.add_header(header, headers[header]) r = urllib.request.urlopen(request, None, timeout) if decompress and r.headers.get('content-encoding', '').lower() == 'gzip': fp = GzipDecodedReader(r.fp) r.fp = fp # Content-Length does not match gzip decoded length # Prevent ``r.read`` from stopping at Content-Length r.length = None return r def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('HEAD', url, **kwargs) def post(self, url, data=None, **kwargs): r"""Sends a POST request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('POST', url, data=data, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwargs \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('DELETE', url, **kwargs) def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False, follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True): """ Sends a request via HTTP(S) or FTP using urllib (Python3) Does not require the module environment """ method = method or ('POST' if data else 'GET') return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent, force_basic_auth=force_basic_auth, follow_redirects=follow_redirects, client_cert=client_cert, client_key=client_key, cookies=cookies, use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc) def prepare_multipart(fields): """Takes a mapping, and prepares a multipart/form-data body :arg fields: Mapping :returns: tuple of (content_type, body) where ``content_type`` is the ``multipart/form-data`` ``Content-Type`` header including ``boundary`` and ``body`` is the prepared bytestring body Payload content from a file will be base64 encoded and will include the appropriate ``Content-Transfer-Encoding`` and ``Content-Type`` headers. Example: { "file1": { "filename": "/bin/true", "mime_type": "application/octet-stream" }, "file2": { "content": "text based file content", "filename": "fake.txt", "mime_type": "text/plain", }, "text_form_field": "value" } """ if not isinstance(fields, Mapping): raise TypeError( 'Mapping is required, cannot be type %s' % fields.__class__.__name__ ) m = email.mime.multipart.MIMEMultipart('form-data') for field, value in sorted(fields.items()): if isinstance(value, str): main_type = 'text' sub_type = 'plain' content = value filename = None elif isinstance(value, Mapping): filename = value.get('filename') content = value.get('content') if not any((filename, content)): raise ValueError('at least one of filename or content must be provided') mime = value.get('mime_type') if not mime: try: mime = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' except Exception: mime = 'application/octet-stream' main_type, sep, sub_type = mime.partition('/') else: raise TypeError( 'value must be a string, or mapping, cannot be type %s' % value.__class__.__name__ ) if not content and filename: with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: part = email.mime.application.MIMEApplication(f.read()) del part['Content-Type'] part.add_header('Content-Type', '%s/%s' % (main_type, sub_type)) else: part = email.mime.nonmultipart.MIMENonMultipart(main_type, sub_type) part.set_payload(to_bytes(content)) part.add_header('Content-Disposition', 'form-data') del part['MIME-Version'] part.set_param( 'name', field, header='Content-Disposition' ) if filename: part.set_param( 'filename', to_native(os.path.basename(filename)), header='Content-Disposition' ) m.attach(part) # Ensure headers are not split over multiple lines # The HTTP policy also uses CRLF by default b_data = m.as_bytes(policy=email.policy.HTTP) del m headers, sep, b_content = b_data.partition(b'\r\n\r\n') del b_data parser = email.parser.BytesHeaderParser().parsebytes return ( parser(headers)['content-type'], # Message converts to native strings b_content ) # # Module-related functions # def basic_auth_header(username, password): """Takes a username and password and returns a byte string suitable for using as value of an Authorization header to do basic auth. """ if password is None: password = '' return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict')) def url_argument_spec(): """ Creates an argument spec that can be used with any module that will be requesting content via urllib/urllib2 """ return dict( url=dict(type='str'), force=dict(type='bool', default=False), http_agent=dict(type='str', default='ansible-httpget'), use_proxy=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), url_username=dict(type='str'), url_password=dict(type='str', no_log=True), force_basic_auth=dict(type='bool', default=False), client_cert=dict(type='path'), client_key=dict(type='path'), use_gssapi=dict(type='bool', default=False), ) def fetch_url(module, url, data=None, headers=None, method=None, use_proxy=None, force=False, last_mod_time=None, timeout=10, use_gssapi=False, unix_socket=None, ca_path=None, cookies=None, unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True): """Sends a request via HTTP(S) or FTP (needs the module as parameter) :arg module: The AnsibleModule (used to get username, password etc. (s.b.). :arg url: The url to use. :kwarg data: The data to be sent (in case of POST/PUT). :kwarg headers: A dict with the request headers. :kwarg method: "POST", "PUT", etc. :kwarg use_proxy: (optional) whether or not to use proxy (Default: True) :kwarg boolean force: If True: Do not get a cached copy (Default: False) :kwarg last_mod_time: Default: None :kwarg int timeout: Default: 10 :kwarg boolean use_gssapi: Default: False :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing connection to the provided url :kwarg ca_path: (optional) String of file system path to CA cert bundle to use :kwarg cookies: (optional) CookieJar object to send with the request :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request :kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses :kwarg cipher: (optional) List of ciphers to use :kwarg boolean use_netrc: (optional) If False: Ignores login and password in ~/.netrc file (Default: True) :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data. The **info** contains the 'status' and other meta data. When a HttpError (status >= 400) occurred then ``info['body']`` contains the error response data:: Example:: data={...} resp, info = fetch_url(module, "http://example.com", data=module.jsonify(data), headers={'Content-type': 'application/json'}, method="POST") status_code = info["status"] body = resp.read() if status_code >= 400 : body = info['body'] """ if not HAS_GZIP: module.fail_json(msg=GzipDecodedReader.missing_gzip_error()) # ensure we use proper tempdir old_tempdir = tempfile.tempdir tempfile.tempdir = module.tmpdir # Get validate_certs from the module params validate_certs = module.params.get('validate_certs', True) if use_proxy is None: use_proxy = module.params.get('use_proxy', True) username = module.params.get('url_username', '') password = module.params.get('url_password', '') http_agent = module.params.get('http_agent', get_user_agent()) force_basic_auth = module.params.get('force_basic_auth', '') follow_redirects = module.params.get('follow_redirects', 'urllib2') client_cert = module.params.get('client_cert') client_key = module.params.get('client_key') use_gssapi = module.params.get('use_gssapi', use_gssapi) if not isinstance(cookies, cookiejar.CookieJar): cookies = cookiejar.CookieJar() r = None info = dict(url=url, status=-1) try: r = open_url(url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=username, url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth, follow_redirects=follow_redirects, client_cert=client_cert, client_key=client_key, cookies=cookies, use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc) # Lowercase keys, to conform to py2 behavior info.update({k.lower(): v for k, v in r.info().items()}) # Don't be lossy, append header values for duplicate headers temp_headers = {} for name, value in r.headers.items(): # The same as above, lower case keys to match py2 behavior, and create more consistent results name = name.lower() if name in temp_headers: temp_headers[name] = ', '.join((temp_headers[name], value)) else: temp_headers[name] = value info.update(temp_headers) # parse the cookies into a nice dictionary cookie_list = [] cookie_dict = {} # Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs`` # Cookies with the same path are reversed from response order. # This code makes no assumptions about that, and accepts the order given by python for cookie in cookies: cookie_dict[cookie.name] = cookie.value cookie_list.append((cookie.name, cookie.value)) info['cookies_string'] = '; '.join('%s=%s' % c for c in cookie_list) info['cookies'] = cookie_dict # finally update the result with a message about the fetch info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code)) except (ConnectionError, ValueError) as e: module.fail_json(msg=to_native(e), **info) except MissingModuleError as e: module.fail_json(msg=to_text(e), exception=e.import_traceback) except urllib.error.HTTPError as e: r = e try: if e.fp is None: # Certain HTTPError objects may not have the ability to call ``.read()`` on Python 3 # This is not handled gracefully in Python 3, and instead an exception is raised from # tempfile, due to ``urllib.response.addinfourl`` not being initialized raise AttributeError body = e.read() except AttributeError: body = '' else: e.close() # Try to add exception info to the output but don't fail if we can't try: # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable info.update({k.lower(): v for k, v in e.info().items()}) except Exception: pass info.update({'msg': to_native(e), 'body': body, 'status': e.code}) except urllib.error.URLError as e: code = int(getattr(e, 'code', -1)) info.update(dict(msg="Request failed: %s" % to_native(e), status=code)) except socket.error as e: info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1)) except http.client.BadStatusLine as e: info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1)) except Exception as e: info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1), exception=traceback.format_exc()) finally: tempfile.tempdir = old_tempdir return r, info def _suffixes(name): """A list of the final component's suffixes, if any.""" if name.endswith('.'): return [] name = name.lstrip('.') return ['.' + s for s in name.split('.')[1:]] def _split_multiext(name, min=3, max=4, count=2): """Split a multi-part extension from a file name. Returns '([name minus extension], extension)'. Define the valid extension length (including the '.') with 'min' and 'max', 'count' sets the number of extensions, counting from the end, to evaluate. Evaluation stops on the first file extension that is outside the min and max range. If no valid extensions are found, the original ``name`` is returned and ``extension`` is empty. :arg name: File name or path. :kwarg min: Minimum length of a valid file extension. :kwarg max: Maximum length of a valid file extension. :kwarg count: Number of suffixes from the end to evaluate. """ extension = '' for i, sfx in enumerate(reversed(_suffixes(name))): if i >= count: break if min <= len(sfx) <= max: extension = '%s%s' % (sfx, extension) name = name.rstrip(sfx) else: # Stop on the first invalid extension break return name, extension def fetch_file(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, unredirected_headers=None, decompress=True, ciphers=None): """Download and save a file via HTTP(S) or FTP (needs the module as parameter). This is basically a wrapper around fetch_url(). :arg module: The AnsibleModule (used to get username, password etc. (s.b.). :arg url: The url to use. :kwarg data: The data to be sent (in case of POST/PUT). :kwarg headers: A dict with the request headers. :kwarg method: "POST", "PUT", etc. :kwarg boolean use_proxy: Default: True :kwarg boolean force: If True: Do not get a cached copy (Default: False) :kwarg last_mod_time: Default: None :kwarg int timeout: Default: 10 :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request :kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses :kwarg ciphers: (optional) List of ciphers to use :returns: A string, the path to the downloaded file. """ # download file bufsize = 65536 parts = urlparse(url) file_prefix, file_ext = _split_multiext(os.path.basename(parts.path), count=2) fetch_temp_file = tempfile.NamedTemporaryFile(dir=module.tmpdir, prefix=file_prefix, suffix=file_ext, delete=False) module.add_cleanup_file(fetch_temp_file.name) try: rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout, unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers) if not rsp or (rsp.code and rsp.code >= 400): module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg'])) data = rsp.read(bufsize) while data: fetch_temp_file.write(data) data = rsp.read(bufsize) fetch_temp_file.close() except Exception as e: module.fail_json(msg="Failure downloading %s, %s" % (url, to_native(e))) return fetch_temp_file.name def get_user_agent(): """Returns a user agent used by open_url""" return u"ansible-httpget"
54,666
Python
.py
1,119
39.660411
146
0.640707
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,210
pycompat24.py
ansible_ansible/lib/ansible/module_utils/pycompat24.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com> # Copyright (c) 2015, Marius Gedminas # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import sys from ansible.module_utils.common.warnings import deprecate def get_exception(): """Get the current exception. This code needs to work on Python 2.4 through 3.x, so we cannot use "except Exception, e:" (SyntaxError on Python 3.x) nor "except Exception as e:" (SyntaxError on Python 2.4-2.5). Instead we must use :: except Exception: e = get_exception() """ deprecate( msg='The `ansible.module_utils.pycompat24.get_exception` ' 'function is deprecated.', version='2.19', ) return sys.exc_info()[1] def __getattr__(importable_name): """Inject import-time deprecation warning for ``literal_eval()``.""" if importable_name == 'literal_eval': deprecate( msg=f'The `ansible.module_utils.pycompat24.' f'{importable_name}` function is deprecated.', version='2.19', ) from ast import literal_eval return literal_eval raise AttributeError( f'cannot import name {importable_name !r} ' f'has no attribute ({__file__ !s})', ) __all__ = ('get_exception', 'literal_eval') # pylint: disable=undefined-all-variable
2,953
Python
.py
60
45
92
0.728819
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,211
basic.py
ansible_ansible/lib/ansible/module_utils/basic.py
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import json import sys import typing as t # Used for determining if the system is running a new enough python version # and should only restrict on our documented minimum versions _PY_MIN = (3, 8) if sys.version_info < _PY_MIN: print(json.dumps(dict( failed=True, msg=f"ansible-core requires a minimum of Python version {'.'.join(map(str, _PY_MIN))}. Current version: {''.join(sys.version.splitlines())}", ))) sys.exit(1) # Ansible modules can be written in any language. # The functions available here can be used to do many common tasks, # to simplify development of Python modules. import __main__ import atexit import errno import grp import fcntl import locale import os import pwd import platform import re import select import selectors import shlex import shutil import stat import subprocess import tempfile import time import traceback from functools import reduce try: import syslog HAS_SYSLOG = True except ImportError: HAS_SYSLOG = False try: from systemd import journal, daemon as systemd_daemon # Makes sure that systemd.journal has method sendv() # Double check that journal has method sendv (some packages don't) # check if the system is running under systemd has_journal = hasattr(journal, 'sendv') and systemd_daemon.booted() except (ImportError, AttributeError): # AttributeError would be caused from use of .booted() if wrong systemd has_journal = False HAVE_SELINUX = False try: from ansible.module_utils.compat import selinux HAVE_SELINUX = True except ImportError: pass # Python2 & 3 way to get NoneType NoneType = type(None) from ._text import to_native, to_bytes, to_text from ansible.module_utils.common.text.converters import ( jsonify, container_to_bytes as json_dict_unicode_to_bytes, container_to_text as json_dict_bytes_to_unicode, ) from ansible.module_utils.common.arg_spec import ModuleArgumentSpecValidator from ansible.module_utils.common.text.formatters import ( lenient_lowercase, bytes_to_human, human_to_bytes, SIZE_RANGES, ) import hashlib def _get_available_hash_algorithms(): """Return a dictionary of available hash function names and their associated function.""" algorithms = {} for algorithm_name in hashlib.algorithms_available: algorithm_func = getattr(hashlib, algorithm_name, None) if algorithm_func: try: # Make sure the algorithm is actually available for use. # Not all algorithms listed as available are actually usable. # For example, md5 is not available in FIPS mode. algorithm_func() except Exception: pass else: algorithms[algorithm_name] = algorithm_func return algorithms AVAILABLE_HASH_ALGORITHMS = _get_available_hash_algorithms() from ansible.module_utils.six.moves.collections_abc import ( KeysView, Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet, ) from ansible.module_utils.common.locale import get_best_parsable_locale from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.file import ( _PERM_BITS as PERM_BITS, _DEFAULT_PERM as DEFAULT_PERM, is_executable, format_attributes, get_flags_from_attributes, FILE_ATTRIBUTES, S_IXANY, S_IRWU_RWG_RWO, ) from ansible.module_utils.common.sys_info import ( get_distribution, get_distribution_version, get_platform_subclass, ) from ansible.module_utils.common.parameters import ( env_fallback, remove_values, sanitize_keys, DEFAULT_TYPE_VALIDATORS, PASS_VARS, PASS_BOOLS, ) from ansible.module_utils.errors import AnsibleFallbackNotFound, AnsibleValidationErrorMultiple, UnsupportedError from ansible.module_utils.common.validation import ( check_missing_parameters, safe_eval, ) from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean from ansible.module_utils.common.warnings import ( deprecate, get_deprecation_messages, get_warning_messages, warn, ) # Note: When getting Sequence from collections, it matches with strings. If # this matters, make sure to check for strings before checking for sequencetype SEQUENCETYPE = frozenset, KeysView, Sequence PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I) imap = map # Internal global holding passed in params. This is consulted in case # multiple AnsibleModules are created. Otherwise each AnsibleModule would # attempt to read from stdin. Other code should not use this directly as it # is an internal implementation detail _ANSIBLE_ARGS = None FILE_COMMON_ARGUMENTS = dict( # These are things we want. About setting metadata (mode, ownership, permissions in general) on # created files (these are used by set_fs_attributes_if_different and included in # load_file_common_arguments) mode=dict(type='raw'), owner=dict(type='str'), group=dict(type='str'), seuser=dict(type='str'), serole=dict(type='str'), selevel=dict(type='str'), setype=dict(type='str'), attributes=dict(type='str', aliases=['attr']), unsafe_writes=dict(type='bool', default=False, fallback=(env_fallback, ['ANSIBLE_UNSAFE_WRITES'])), # should be available to any module using atomic_move ) PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?') # Used for parsing symbolic file perms MODE_OPERATOR_RE = re.compile(r'[+=-]') USERS_RE = re.compile(r'^[ugo]+$') PERMS_RE = re.compile(r'^[rwxXstugo]*$') # # Deprecated functions # def get_platform(): """ **Deprecated** Use :py:func:`platform.system` directly. :returns: Name of the platform the module is running on in a native string Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is the result of calling :py:func:`platform.system`. """ return platform.system() # End deprecated functions # # Compat shims # def load_platform_subclass(cls, *args, **kwargs): """**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead""" platform_cls = get_platform_subclass(cls) return super(cls, platform_cls).__new__(platform_cls) def get_all_subclasses(cls): """**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead""" return list(_get_all_subclasses(cls)) # End compat shims def heuristic_log_sanitize(data, no_log_values=None): """ Remove strings that look like passwords from log messages """ # Currently filters: # user:pass@foo/whatever and http://username:pass@wherever/foo # This code has false positives and consumes parts of logs that are # not passwds # begin: start of a passwd containing string # end: end of a passwd containing string # sep: char between user and passwd # prev_begin: where in the overall string to start a search for # a passwd # sep_search_end: where in the string to end a search for the sep data = to_native(data) output = [] begin = len(data) prev_begin = begin sep = 1 while sep: # Find the potential end of a passwd try: end = data.rindex('@', 0, begin) except ValueError: # No passwd in the rest of the data output.insert(0, data[0:begin]) break # Search for the beginning of a passwd sep = None sep_search_end = end while not sep: # URL-style username+password try: begin = data.rindex('://', 0, sep_search_end) except ValueError: # No url style in the data, check for ssh style in the # rest of the string begin = 0 # Search for separator try: sep = data.index(':', begin + 3, end) except ValueError: # No separator; choices: if begin == 0: # Searched the whole string so there's no password # here. Return the remaining data output.insert(0, data[0:prev_begin]) break # Search for a different beginning of the password field. sep_search_end = begin continue if sep: # Password was found; remove it. output.insert(0, data[end:prev_begin]) output.insert(0, '********') output.insert(0, data[begin:sep + 1]) prev_begin = begin output = ''.join(output) if no_log_values: output = remove_values(output, no_log_values) return output def _load_params(): """ read the modules parameters and store them globally. This function may be needed for certain very dynamic custom modules which want to process the parameters that are being handed the module. Since this is so closely tied to the implementation of modules we cannot guarantee API stability for it (it may change between versions) however we will try not to break it gratuitously. It is certainly more future-proof to call this function and consume its outputs than to implement the logic inside it as a copy in your own code. """ global _ANSIBLE_ARGS if _ANSIBLE_ARGS is not None: buffer = _ANSIBLE_ARGS else: # debug overrides to read args from file or cmdline # Avoid tracebacks when locale is non-utf8 # We control the args and we pass them as utf8 if len(sys.argv) > 1: if os.path.isfile(sys.argv[1]): with open(sys.argv[1], 'rb') as fd: buffer = fd.read() else: buffer = sys.argv[1].encode('utf-8', errors='surrogateescape') # default case, read from stdin else: buffer = sys.stdin.buffer.read() _ANSIBLE_ARGS = buffer try: params = json.loads(buffer.decode('utf-8')) except ValueError: # This helper is used too early for fail_json to work. print('\n{"msg": "Error: Module unable to decode stdin/parameters as valid JSON. Unable to parse what parameters were passed", "failed": true}') sys.exit(1) try: return params['ANSIBLE_MODULE_ARGS'] except KeyError: # This helper does not have access to fail_json so we have to print # json output on our own. print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in JSON data from stdin. Unable to figure out what parameters were passed", ' '"failed": true}') sys.exit(1) def missing_required_lib(library, reason=None, url=None): hostname = platform.node() msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable) if reason: msg += " This is required %s." % reason if url: msg += " See %s for more info." % url msg += (" Please read the module documentation and install it in the appropriate location." " If the required library is installed, but Ansible is using the wrong Python interpreter," " please consult the documentation on ansible_python_interpreter") return msg class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False, supports_check_mode=False, required_if=None, required_by=None): """ Common code for quickly building an ansible module in Python (although you can write modules with anything that can return JSON). See :ref:`developing_modules_general` for a general introduction and :ref:`developing_program_flow_modules` for more detailed explanation. """ self._name = os.path.basename(__file__) # initialize name until we can parse from options self.argument_spec = argument_spec self.supports_check_mode = supports_check_mode self.check_mode = False self.bypass_checks = bypass_checks self.no_log = no_log self.mutually_exclusive = mutually_exclusive self.required_together = required_together self.required_one_of = required_one_of self.required_if = required_if self.required_by = required_by self.cleanup_files = [] self._debug = False self._diff = False self._socket_path = None self._shell = None self._syslog_facility = 'LOG_USER' self._verbosity = 0 # May be used to set modifications to the environment for any # run_command invocation self.run_command_environ_update = {} self._clean = {} self._string_conversion_action = '' self.aliases = {} self._legal_inputs = [] self._options_context = list() self._tmpdir = None if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.items(): if k not in self.argument_spec: self.argument_spec[k] = v # Save parameter values that should never be logged self.no_log_values = set() # check the locale as set by the current environment, and reset to # a known valid (LANG=C) if it's an invalid/unavailable locale self._check_locale() self._load_params() self._set_internal_properties() self.validator = ModuleArgumentSpecValidator(self.argument_spec, self.mutually_exclusive, self.required_together, self.required_one_of, self.required_if, self.required_by, ) self.validation_result = self.validator.validate(self.params) self.params.update(self.validation_result.validated_parameters) self.no_log_values.update(self.validation_result._no_log_values) self.aliases.update(self.validation_result._aliases) try: error = self.validation_result.errors[0] if isinstance(error, UnsupportedError) and self._ignore_unknown_opts: error = None except IndexError: error = None # Fail for validation errors, even in check mode if error: msg = self.validation_result.errors.msg if isinstance(error, UnsupportedError): msg = "Unsupported parameters for ({name}) {kind}: {msg}".format(name=self._name, kind='module', msg=msg) self.fail_json(msg=msg) if self.check_mode and not self.supports_check_mode: self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name) # This is for backwards compatibility only. self._CHECK_ARGUMENT_TYPES_DISPATCHER = DEFAULT_TYPE_VALIDATORS if not self.no_log: self._log_invocation() # selinux state caching self._selinux_enabled = None self._selinux_mls_enabled = None self._selinux_initial_context = None # finally, make sure we're in a logical working dir self._set_cwd() @property def tmpdir(self): # if _ansible_tmpdir was not set and we have a remote_tmp, # the module needs to create it and clean it up once finished. # otherwise we create our own module tmp dir from the system defaults if self._tmpdir is None: basedir = None if self._remote_tmp is not None: basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp)) if basedir is not None and not os.path.exists(basedir): try: os.makedirs(basedir, mode=0o700) except (OSError, IOError) as e: self.warn("Unable to use %s as temporary directory, " "failing back to system: %s" % (basedir, to_native(e))) basedir = None else: self.warn("Module remote_tmp %s did not exist and was " "created with a mode of 0700, this may cause" " issues when running as another user. To " "avoid this, create the remote_tmp dir with " "the correct permissions manually" % basedir) basefile = "ansible-moduletmp-%s-" % time.time() try: tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir) except (OSError, IOError) as e: self.fail_json( msg="Failed to create remote module tmp path at dir %s " "with prefix %s: %s" % (basedir, basefile, to_native(e)) ) if not self._keep_remote_files: atexit.register(shutil.rmtree, tmpdir) self._tmpdir = tmpdir return self._tmpdir def warn(self, warning): warn(warning) self.log('[WARNING] %s' % warning) def deprecate(self, msg, version=None, date=None, collection_name=None): if version is not None and date is not None: raise AssertionError("implementation error -- version and date must not both be set") deprecate(msg, version=version, date=date, collection_name=collection_name) # For compatibility, we accept that neither version nor date is set, # and treat that the same as if version would not have been set if date is not None: self.log('[DEPRECATION WARNING] %s %s' % (msg, date)) else: self.log('[DEPRECATION WARNING] %s %s' % (msg, version)) def load_file_common_arguments(self, params, path=None): """ many modules deal with files, this encapsulates common options that the file module accepts such that it is directly available to all modules and they can share code. Allows to overwrite the path/dest module argument by providing path. """ if path is None: path = params.get('path', params.get('dest', None)) if path is None: return {} else: path = os.path.expanduser(os.path.expandvars(path)) b_path = to_bytes(path, errors='surrogate_or_strict') # if the path is a symlink, and we're following links, get # the target of the link instead for testing if params.get('follow', False) and os.path.islink(b_path): b_path = os.path.realpath(b_path) path = to_native(b_path) mode = params.get('mode', None) owner = params.get('owner', None) group = params.get('group', None) # selinux related options seuser = params.get('seuser', None) serole = params.get('serole', None) setype = params.get('setype', None) selevel = params.get('selevel', None) secontext = [seuser, serole, setype] if self.selinux_mls_enabled(): secontext.append(selevel) default_secontext = self.selinux_default_context(path) for i in range(len(default_secontext)): if i is not None and secontext[i] == '_default': secontext[i] = default_secontext[i] attributes = params.get('attributes', None) return dict( path=path, mode=mode, owner=owner, group=group, seuser=seuser, serole=serole, setype=setype, selevel=selevel, secontext=secontext, attributes=attributes, ) # Detect whether using selinux that is MLS-aware. # While this means you can set the level/range with # selinux.lsetfilecon(), it may or may not mean that you # will get the selevel as part of the context returned # by selinux.lgetfilecon(). def selinux_mls_enabled(self): if self._selinux_mls_enabled is None: self._selinux_mls_enabled = HAVE_SELINUX and selinux.is_selinux_mls_enabled() == 1 return self._selinux_mls_enabled def selinux_enabled(self): if self._selinux_enabled is None: self._selinux_enabled = HAVE_SELINUX and selinux.is_selinux_enabled() == 1 return self._selinux_enabled # Determine whether we need a placeholder for selevel/mls def selinux_initial_context(self): if self._selinux_initial_context is None: self._selinux_initial_context = [None, None, None] if self.selinux_mls_enabled(): self._selinux_initial_context.append(None) return self._selinux_initial_context # If selinux fails to find a default, return an array of None def selinux_default_context(self, path, mode=0): context = self.selinux_initial_context() if not self.selinux_enabled(): return context try: ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode) except OSError: return context if ret[0] == -1: return context # Limit split to 4 because the selevel, the last in the list, # may contain ':' characters context = ret[1].split(':', 3) return context def selinux_context(self, path): context = self.selinux_initial_context() if not self.selinux_enabled(): return context try: ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict')) except OSError as e: if e.errno == errno.ENOENT: self.fail_json(path=path, msg='path %s does not exist' % path) else: self.fail_json(path=path, msg='failed to retrieve selinux context') if ret[0] == -1: return context # Limit split to 4 because the selevel, the last in the list, # may contain ':' characters context = ret[1].split(':', 3) return context def user_and_group(self, path, expand=True): b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) st = os.lstat(b_path) uid = st.st_uid gid = st.st_gid return (uid, gid) def find_mount_point(self, path): """ Takes a path and returns its mount point :param path: a string type with a filesystem path :returns: the path to the mount point as a text type """ b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict')) while not os.path.ismount(b_path): b_path = os.path.dirname(b_path) return to_text(b_path, errors='surrogate_or_strict') def is_special_selinux_path(self, path): """ Returns a tuple containing (True, selinux_context) if the given path is on a NFS or other 'special' fs mount point, otherwise the return will be (False, None). """ try: with open('/proc/mounts', 'r') as f: mount_data = f.readlines() except Exception: return (False, None) path_mount_point = self.find_mount_point(path) for line in mount_data: (device, mount_point, fstype, options, rest) = line.split(' ', 4) if to_bytes(path_mount_point) == to_bytes(mount_point): for fs in self._selinux_special_fs: if fs in fstype: special_context = self.selinux_context(path_mount_point) return (True, special_context) return (False, None) def set_default_selinux_context(self, path, changed): if not self.selinux_enabled(): return changed context = self.selinux_default_context(path) return self.set_context_if_different(path, context, False) def set_context_if_different(self, path, context, changed, diff=None): if not self.selinux_enabled(): return changed if self.check_file_absent_if_check_mode(path): return True cur_context = self.selinux_context(path) new_context = list(cur_context) # Iterate over the current context instead of the # argument context, which may have selevel. (is_special_se, sp_context) = self.is_special_selinux_path(path) if is_special_se: new_context = sp_context else: for i in range(len(cur_context)): if len(context) > i: if context[i] is not None and context[i] != cur_context[i]: new_context[i] = context[i] elif context[i] is None: new_context[i] = cur_context[i] if cur_context != new_context: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['secontext'] = cur_context if 'after' not in diff: diff['after'] = {} diff['after']['secontext'] = new_context try: if self.check_mode: return True rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context)) except OSError as e: self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e), new_context=new_context, cur_context=cur_context, input_was=context) if rc != 0: self.fail_json(path=path, msg='set selinux context failed') changed = True return changed def set_owner_if_different(self, path, owner, changed, diff=None, expand=True): if owner is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) if self.check_file_absent_if_check_mode(b_path): return True orig_uid, orig_gid = self.user_and_group(b_path, expand) try: uid = int(owner) except ValueError: try: uid = pwd.getpwnam(owner).pw_uid except KeyError: path = to_text(b_path) self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner) if orig_uid != uid: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['owner'] = orig_uid if 'after' not in diff: diff['after'] = {} diff['after']['owner'] = uid if self.check_mode: return True try: os.lchown(b_path, uid, -1) except (IOError, OSError) as e: path = to_text(b_path) self.fail_json(path=path, msg='chown failed: %s' % (to_text(e))) changed = True return changed def set_group_if_different(self, path, group, changed, diff=None, expand=True): if group is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) if self.check_file_absent_if_check_mode(b_path): return True orig_uid, orig_gid = self.user_and_group(b_path, expand) try: gid = int(group) except ValueError: try: gid = grp.getgrnam(group).gr_gid except KeyError: path = to_text(b_path) self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group) if orig_gid != gid: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['group'] = orig_gid if 'after' not in diff: diff['after'] = {} diff['after']['group'] = gid if self.check_mode: return True try: os.lchown(b_path, -1, gid) except OSError: path = to_text(b_path) self.fail_json(path=path, msg='chgrp failed') changed = True return changed def set_mode_if_different(self, path, mode, changed, diff=None, expand=True): if mode is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) if self.check_file_absent_if_check_mode(b_path): return True path_stat = os.lstat(b_path) if not isinstance(mode, int): try: mode = int(mode, 8) except Exception: try: mode = self._symbolic_mode_to_octal(path_stat, mode) except Exception as e: path = to_text(b_path) self.fail_json(path=path, msg="mode must be in octal or symbolic form", details=to_native(e)) if mode != stat.S_IMODE(mode): # prevent mode from having extra info or being invalid long number path = to_text(b_path) self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode) prev_mode = stat.S_IMODE(path_stat.st_mode) if prev_mode != mode: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['mode'] = '0%03o' % prev_mode if 'after' not in diff: diff['after'] = {} diff['after']['mode'] = '0%03o' % mode if self.check_mode: return True # FIXME: comparison against string above will cause this to be executed # every time try: if hasattr(os, 'lchmod'): os.lchmod(b_path, mode) else: if not os.path.islink(b_path): os.chmod(b_path, mode) else: # Attempt to set the perms of the symlink but be # careful not to change the perms of the underlying # file while trying underlying_stat = os.stat(b_path) os.chmod(b_path, mode) new_underlying_stat = os.stat(b_path) if underlying_stat.st_mode != new_underlying_stat.st_mode: os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode)) except OSError as e: if os.path.islink(b_path) and e.errno in ( errno.EACCES, # can't access symlink in sticky directory (stat) errno.EPERM, # can't set mode on symbolic links (chmod) errno.EROFS, # can't set mode on read-only filesystem ): pass elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links pass else: raise except Exception as e: path = to_text(b_path) self.fail_json(path=path, msg='chmod failed', details=to_native(e), exception=traceback.format_exc()) path_stat = os.lstat(b_path) new_mode = stat.S_IMODE(path_stat.st_mode) if new_mode != prev_mode: changed = True return changed def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True): if attributes is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') if expand: b_path = os.path.expanduser(os.path.expandvars(b_path)) if self.check_file_absent_if_check_mode(b_path): return True existing = self.get_file_attributes(b_path, include_version=False) attr_mod = '=' if attributes.startswith(('-', '+')): attr_mod = attributes[0] attributes = attributes[1:] if attributes and (existing.get('attr_flags', '') != attributes or attr_mod == '-'): attrcmd = self.get_bin_path('chattr') if attrcmd: attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path] changed = True if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['attributes'] = existing.get('attr_flags') if 'after' not in diff: diff['after'] = {} diff['after']['attributes'] = '%s%s' % (attr_mod, attributes) if not self.check_mode: try: rc, out, err = self.run_command(attrcmd) if rc != 0 or err: raise Exception("Error while setting attributes: %s" % (out + err)) except Exception as e: self.fail_json(path=to_text(b_path), msg='chattr failed', details=to_native(e), exception=traceback.format_exc()) return changed def get_file_attributes(self, path, include_version=True): output = {} attrcmd = self.get_bin_path('lsattr', False) if attrcmd: flags = '-vd' if include_version else '-d' attrcmd = [attrcmd, flags, path] try: rc, out, err = self.run_command(attrcmd) if rc == 0: res = out.split() attr_flags_idx = 0 if include_version: attr_flags_idx = 1 output['version'] = res[0].strip() output['attr_flags'] = res[attr_flags_idx].replace('-', '').strip() output['attributes'] = format_attributes(output['attr_flags']) except Exception: pass return output @classmethod def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode): """ This enables symbolic chmod string parsing as stated in the chmod man-page This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X" """ new_mode = stat.S_IMODE(path_stat.st_mode) # Now parse all symbolic modes for mode in symbolic_mode.split(','): # Per single mode. This always contains a '+', '-' or '=' # Split it on that permlist = MODE_OPERATOR_RE.split(mode) # And find all the operators opers = MODE_OPERATOR_RE.findall(mode) # The user(s) where it's all about is the first element in the # 'permlist' list. Take that and remove it from the list. # An empty user or 'a' means 'all'. users = permlist.pop(0) use_umask = (users == '') if users == 'a' or users == '': users = 'ugo' # Check if there are illegal characters in the user list # They can end up in 'users' because they are not split if not USERS_RE.match(users): raise ValueError("bad symbolic permission for mode: %s" % mode) # Now we have two list of equal length, one contains the requested # permissions and one with the corresponding operators. for idx, perms in enumerate(permlist): # Check if there are illegal characters in the permissions if not PERMS_RE.match(perms): raise ValueError("bad symbolic permission for mode: %s" % mode) for user in users: mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask, new_mode) new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode) return new_mode @staticmethod def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode): if operator == '=': if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX # mask out u, g, or o permissions from current_mode and apply new permissions inverse_mask = mask ^ PERM_BITS new_mode = (current_mode & inverse_mask) | mode_to_apply elif operator == '+': new_mode = current_mode | mode_to_apply elif operator == '-': new_mode = current_mode - (current_mode & mode_to_apply) return new_mode @staticmethod def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask, prev_mode=None): if prev_mode is None: prev_mode = stat.S_IMODE(path_stat.st_mode) is_directory = stat.S_ISDIR(path_stat.st_mode) has_x_permissions = (prev_mode & S_IXANY) > 0 apply_X_permission = is_directory or has_x_permissions # Get the umask, if the 'user' part is empty, the effect is as if (a) were # given, but bits that are set in the umask are not affected. # We also need the "reversed umask" for masking umask = os.umask(0) os.umask(umask) rev_umask = umask ^ PERM_BITS # Permission bits constants documented at: # https://docs.python.org/3/library/stat.html#stat.S_ISUID if apply_X_permission: X_perms = { 'u': {'X': stat.S_IXUSR}, 'g': {'X': stat.S_IXGRP}, 'o': {'X': stat.S_IXOTH}, } else: X_perms = { 'u': {'X': 0}, 'g': {'X': 0}, 'o': {'X': 0}, } user_perms_to_modes = { 'u': { 'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR, 'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR, 'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR, 's': stat.S_ISUID, 't': 0, 'u': prev_mode & stat.S_IRWXU, 'g': (prev_mode & stat.S_IRWXG) << 3, 'o': (prev_mode & stat.S_IRWXO) << 6}, 'g': { 'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP, 'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP, 'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP, 's': stat.S_ISGID, 't': 0, 'u': (prev_mode & stat.S_IRWXU) >> 3, 'g': prev_mode & stat.S_IRWXG, 'o': (prev_mode & stat.S_IRWXO) << 3}, 'o': { 'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH, 'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH, 'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH, 's': 0, 't': stat.S_ISVTX, 'u': (prev_mode & stat.S_IRWXU) >> 6, 'g': (prev_mode & stat.S_IRWXG) >> 3, 'o': prev_mode & stat.S_IRWXO}, } # Insert X_perms into user_perms_to_modes for key, value in X_perms.items(): user_perms_to_modes[key].update(value) def or_reduce(mode, perm): return mode | user_perms_to_modes[user][perm] return reduce(or_reduce, perms, 0) def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True): # set modes owners and context as needed changed = self.set_context_if_different( file_args['path'], file_args['secontext'], changed, diff ) changed = self.set_owner_if_different( file_args['path'], file_args['owner'], changed, diff, expand ) changed = self.set_group_if_different( file_args['path'], file_args['group'], changed, diff, expand ) changed = self.set_mode_if_different( file_args['path'], file_args['mode'], changed, diff, expand ) changed = self.set_attributes_if_different( file_args['path'], file_args['attributes'], changed, diff, expand ) return changed def check_file_absent_if_check_mode(self, file_path): return self.check_mode and not os.path.exists(file_path) def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True): return self.set_fs_attributes_if_different(file_args, changed, diff, expand) def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True): return self.set_fs_attributes_if_different(file_args, changed, diff, expand) def add_path_info(self, kwargs): """ for results that are files, supplement the info about the file in the return path with stats about the file path. """ path = kwargs.get('path', kwargs.get('dest', None)) if path is None: return kwargs b_path = to_bytes(path, errors='surrogate_or_strict') if os.path.exists(b_path): (uid, gid) = self.user_and_group(path) kwargs['uid'] = uid kwargs['gid'] = gid try: user = pwd.getpwuid(uid)[0] except KeyError: user = str(uid) try: group = grp.getgrgid(gid)[0] except KeyError: group = str(gid) kwargs['owner'] = user kwargs['group'] = group st = os.lstat(b_path) kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE]) # secontext not yet supported if os.path.islink(b_path): kwargs['state'] = 'link' elif os.path.isdir(b_path): kwargs['state'] = 'directory' elif os.stat(b_path).st_nlink > 1: kwargs['state'] = 'hard' else: kwargs['state'] = 'file' if self.selinux_enabled(): kwargs['secontext'] = ':'.join(self.selinux_context(path)) kwargs['size'] = st[stat.ST_SIZE] return kwargs def _check_locale(self): """ Uses the locale module to test the currently set locale (per the LANG and LC_CTYPE environment settings) """ try: # setting the locale to '' uses the default locale # as it would be returned by locale.getdefaultlocale() locale.setlocale(locale.LC_ALL, '') except locale.Error: # fallback to the 'best' locale, per the function # final fallback is 'C', which may cause unicode issues # but is preferable to simply failing on unknown locale best_locale = get_best_parsable_locale(self) # need to set several since many tools choose to ignore documented precedence and scope locale.setlocale(locale.LC_ALL, best_locale) os.environ['LANG'] = best_locale os.environ['LC_ALL'] = best_locale os.environ['LC_MESSAGES'] = best_locale except Exception as e: self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % to_native(e), exception=traceback.format_exc()) def _set_internal_properties(self, argument_spec=None, module_parameters=None): if argument_spec is None: argument_spec = self.argument_spec if module_parameters is None: module_parameters = self.params for k in PASS_VARS: # handle setting internal properties from internal ansible vars param_key = '_ansible_%s' % k if param_key in module_parameters: if k in PASS_BOOLS: setattr(self, PASS_VARS[k][0], self.boolean(module_parameters[param_key])) else: setattr(self, PASS_VARS[k][0], module_parameters[param_key]) # clean up internal top level params: if param_key in self.params: del self.params[param_key] else: # use defaults if not already set if not hasattr(self, PASS_VARS[k][0]): setattr(self, PASS_VARS[k][0], PASS_VARS[k][1]) def safe_eval(self, value, locals=None, include_exceptions=False): # deprecated: description='no longer used in the codebase' core_version='2.21' return safe_eval(value, locals, include_exceptions) def _load_params(self): """ read the input and set the params attribute. This method is for backwards compatibility. The guts of the function were moved out in 2.1 so that custom modules could read the parameters. """ # debug overrides to read args from file or cmdline self.params = _load_params() def _log_to_syslog(self, msg): if HAS_SYSLOG: try: module = 'ansible-%s' % self._name facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER) syslog.openlog(str(module), 0, facility) syslog.syslog(syslog.LOG_INFO, msg) except (TypeError, ValueError) as e: self.fail_json( msg='Failed to log to syslog (%s). To proceed anyway, ' 'disable syslog logging by setting no_target_syslog ' 'to True in your Ansible config.' % to_native(e), exception=traceback.format_exc(), msg_to_log=msg, ) def debug(self, msg): if self._debug: self.log('[debug] %s' % msg) def log(self, msg, log_args=None): if not self.no_log: if log_args is None: log_args = dict() module = 'ansible-%s' % self._name if isinstance(module, bytes): module = module.decode('utf-8', 'replace') # 6655 - allow for accented characters if not isinstance(msg, (bytes, str)): raise TypeError("msg should be a string (got %s)" % type(msg)) # We want journal to always take text type # syslog takes bytes on py2, text type on py3 if isinstance(msg, bytes): journal_msg = msg.decode('utf-8', 'replace') else: # TODO: surrogateescape is a danger here on Py3 journal_msg = msg if self._target_log_info: journal_msg = ' '.join([self._target_log_info, journal_msg]) # ensure we clean up secrets! journal_msg = remove_values(journal_msg, self.no_log_values) if has_journal: journal_args = [("MODULE", os.path.basename(__file__))] for arg in log_args: name, value = (arg.upper(), str(log_args[arg])) if name in ( 'PRIORITY', 'MESSAGE', 'MESSAGE_ID', 'CODE_FILE', 'CODE_LINE', 'CODE_FUNC', 'SYSLOG_FACILITY', 'SYSLOG_IDENTIFIER', 'SYSLOG_PID', ): name = "_%s" % name journal_args.append((name, value)) try: if HAS_SYSLOG: # If syslog_facility specified, it needs to convert # from the facility name to the facility code, and # set it as SYSLOG_FACILITY argument of journal.send() facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER) >> 3 journal.send(MESSAGE=u"%s %s" % (module, journal_msg), SYSLOG_FACILITY=facility, **dict(journal_args)) else: journal.send(MESSAGE=u"%s %s" % (module, journal_msg), **dict(journal_args)) except IOError: # fall back to syslog since logging to journal failed self._log_to_syslog(journal_msg) else: self._log_to_syslog(journal_msg) def _log_invocation(self): """ log that ansible ran the module """ # TODO: generalize a separate log function and make log_invocation use it # Sanitize possible password argument when logging. log_args = dict() for param in self.params: canon = self.aliases.get(param, param) arg_opts = self.argument_spec.get(canon, {}) no_log = arg_opts.get('no_log', None) # try to proactively capture password/passphrase fields if no_log is None and PASSWORD_MATCH.search(param): log_args[param] = 'NOT_LOGGING_PASSWORD' self.warn('Module did not set no_log for %s' % param) elif self.boolean(no_log): log_args[param] = 'NOT_LOGGING_PARAMETER' else: param_val = self.params[param] if not isinstance(param_val, (str, bytes)): param_val = str(param_val) elif isinstance(param_val, str): param_val = param_val.encode('utf-8') log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values) msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()] if msg: msg = 'Invoked with %s' % ' '.join(msg) else: msg = 'Invoked' self.log(msg, log_args=log_args) def _set_cwd(self): try: cwd = os.getcwd() if not os.access(cwd, os.F_OK | os.R_OK): raise Exception() return cwd except Exception: # we don't have access to the cwd, probably because of sudo. # Try and move to a neutral location to prevent errors for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]: try: if os.access(cwd, os.F_OK | os.R_OK): os.chdir(cwd) return cwd except Exception: pass # we won't error here, as it may *not* be a problem, # and we don't want to break modules unnecessarily return None def get_bin_path(self, arg, required=False, opt_dirs=None): """ Find system executable in PATH. :param arg: The executable to find. :param required: if the executable is not found and required is ``True``, fail_json :param opt_dirs: optional list of directories to search in addition to ``PATH`` :returns: if found return full path; otherwise return original arg, unless 'warning' then return None :raises: Sysexit: if arg is not found and required=True (via fail_json) """ bin_path = None try: bin_path = get_bin_path(arg=arg, opt_dirs=opt_dirs) except ValueError as e: if required: self.fail_json(msg=to_text(e)) return bin_path def boolean(self, arg): """Convert the argument to a boolean""" if arg is None: return arg try: return boolean(arg) except TypeError as e: self.fail_json(msg=to_native(e)) def jsonify(self, data): try: return jsonify(data) except UnicodeError as e: self.fail_json(msg=to_text(e)) def from_json(self, data): return json.loads(data) def add_cleanup_file(self, path): if path not in self.cleanup_files: self.cleanup_files.append(path) def do_cleanup_files(self): for path in self.cleanup_files: self.cleanup(path) def _return_formatted(self, kwargs): self.add_path_info(kwargs) if 'invocation' not in kwargs: kwargs['invocation'] = {'module_args': self.params} if 'warnings' in kwargs: if isinstance(kwargs['warnings'], list): for w in kwargs['warnings']: self.warn(w) else: self.warn(kwargs['warnings']) warnings = get_warning_messages() if warnings: kwargs['warnings'] = warnings if 'deprecations' in kwargs: if isinstance(kwargs['deprecations'], list): for d in kwargs['deprecations']: if isinstance(d, SEQUENCETYPE) and len(d) == 2: self.deprecate(d[0], version=d[1]) elif isinstance(d, Mapping): self.deprecate(d['msg'], version=d.get('version'), date=d.get('date'), collection_name=d.get('collection_name')) else: self.deprecate(d) # pylint: disable=ansible-deprecated-no-version else: self.deprecate(kwargs['deprecations']) # pylint: disable=ansible-deprecated-no-version deprecations = get_deprecation_messages() if deprecations: kwargs['deprecations'] = deprecations # preserve bools/none from no_log preserved = {k: v for k, v in kwargs.items() if v is None or isinstance(v, bool)} # strip no_log collisions kwargs = remove_values(kwargs, self.no_log_values) # return preserved kwargs.update(preserved) print('\n%s' % self.jsonify(kwargs)) def exit_json(self, **kwargs) -> t.NoReturn: """ return from the module, without error """ self.do_cleanup_files() self._return_formatted(kwargs) sys.exit(0) def fail_json(self, msg, **kwargs) -> t.NoReturn: """ return from the module, with an error message """ kwargs['failed'] = True kwargs['msg'] = msg # Add traceback if debug or high verbosity and it is missing # NOTE: Badly named as exception, it really always has been a traceback if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3): kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2])) self.do_cleanup_files() self._return_formatted(kwargs) sys.exit(1) def fail_on_missing_params(self, required_params=None): if not required_params: return try: check_missing_parameters(self.params, required_params) except TypeError as e: self.fail_json(msg=to_native(e)) def digest_from_file(self, filename, algorithm): """ Return hex digest of local file for a digest_method specified by name, or None if file is not present. """ b_filename = to_bytes(filename, errors='surrogate_or_strict') if not os.path.exists(b_filename): return None if os.path.isdir(b_filename): self.fail_json(msg="attempted to take checksum of directory: %s" % filename) # preserve old behaviour where the third parameter was a hash algorithm object if hasattr(algorithm, 'hexdigest'): digest_method = algorithm else: try: digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]() except KeyError: self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" % (filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS))) blocksize = 64 * 1024 infile = open(os.path.realpath(b_filename), 'rb') block = infile.read(blocksize) while block: digest_method.update(block) block = infile.read(blocksize) infile.close() return digest_method.hexdigest() def md5(self, filename): """ Return MD5 hex digest of local file using digest_from_file(). Do not use this function unless you have no other choice for: 1) Optional backwards compatibility 2) Compatibility with a third party protocol This function will not work on systems complying with FIPS-140-2. Most uses of this function can use the module.sha1 function instead. """ if 'md5' not in AVAILABLE_HASH_ALGORITHMS: raise ValueError('MD5 not available. Possibly running in FIPS mode') return self.digest_from_file(filename, 'md5') def sha1(self, filename): """ Return SHA1 hex digest of local file using digest_from_file(). """ return self.digest_from_file(filename, 'sha1') def sha256(self, filename): """ Return SHA-256 hex digest of local file using digest_from_file(). """ return self.digest_from_file(filename, 'sha256') def backup_local(self, fn): """make a date-marked backup of the specified file, return True or False on success or failure""" backupdest = '' if os.path.exists(fn): # backups named basename.PID.YYYY-MM-DD@HH:MM:SS~ ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time())) backupdest = '%s.%s.%s' % (fn, os.getpid(), ext) try: self.preserved_copy(fn, backupdest) except (shutil.Error, IOError) as e: self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e))) return backupdest def cleanup(self, tmpfile): if os.path.exists(tmpfile): try: os.unlink(tmpfile) except OSError as e: sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e))) def preserved_copy(self, src, dest): """Copy a file with preserved ownership, permissions and context""" # shutil.copy2(src, dst) # Similar to shutil.copy(), but metadata is copied as well - in fact, # this is just shutil.copy() followed by copystat(). This is similar # to the Unix command cp -p. # shutil.copystat(src, dst) # Copy the permission bits, last access time, last modification time, # and flags from src to dst. The file contents, owner, and group are # unaffected. src and dst are path names given as strings. shutil.copy2(src, dest) # Set the context if self.selinux_enabled(): context = self.selinux_context(src) self.set_context_if_different(dest, context, False) # chown it try: dest_stat = os.stat(src) tmp_stat = os.stat(dest) if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): os.chown(dest, dest_stat.st_uid, dest_stat.st_gid) except OSError as e: if e.errno != errno.EPERM: raise # Set the attributes current_attribs = self.get_file_attributes(src, include_version=False) current_attribs = current_attribs.get('attr_flags', '') self.set_attributes_if_different(dest, current_attribs, True) def atomic_move(self, src, dest, unsafe_writes=False, keep_dest_attrs=True): """atomically move src to dest, copying attributes from dest, returns true on success it uses os.rename to ensure this as it is an atomic operation, rest of the function is to work around limitations, corner cases and ensure selinux context is saved if possible""" context = None dest_stat = None b_src = to_bytes(src, errors='surrogate_or_strict') b_dest = to_bytes(dest, errors='surrogate_or_strict') if os.path.exists(b_dest) and keep_dest_attrs: try: dest_stat = os.stat(b_dest) os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid) shutil.copystat(b_dest, b_src) os.utime(b_src, times=(time.time(), time.time())) except OSError as e: if e.errno != errno.EPERM: raise if self.selinux_enabled(): context = self.selinux_context(dest) else: if self.selinux_enabled(): context = self.selinux_default_context(dest) creating = not os.path.exists(b_dest) try: # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic. os.rename(b_src, b_dest) except (IOError, OSError) as e: if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]: # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc()) else: # Use bytes here. In the shippable CI, this fails with # a UnicodeError with surrogateescape'd strings for an unknown # reason (doesn't happen in a local Ubuntu16.04 VM) b_dest_dir = os.path.dirname(b_dest) b_suffix = os.path.basename(b_dest) error_msg = None tmp_dest_name = None try: tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp', dir=b_dest_dir, suffix=b_suffix) except (OSError, IOError) as e: error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e)) finally: if error_msg: if unsafe_writes: self._unsafe_writes(b_src, b_dest) else: self.fail_json(msg=error_msg, exception=traceback.format_exc()) if tmp_dest_name: b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict') try: try: # close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host) os.close(tmp_dest_fd) # leaves tmp file behind when sudo and not root try: shutil.move(b_src, b_tmp_dest_name, copy_function=shutil.copy if keep_dest_attrs else shutil.copy2) except OSError: # cleanup will happen by 'rm' of tmpdir # copy2 will preserve some metadata if keep_dest_attrs: shutil.copy(b_src, b_tmp_dest_name) else: shutil.copy2(b_src, b_tmp_dest_name) if self.selinux_enabled(): self.set_context_if_different( b_tmp_dest_name, context, False) try: tmp_stat = os.stat(b_tmp_dest_name) if keep_dest_attrs: if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid) os.utime(b_tmp_dest_name, times=(time.time(), time.time())) except OSError as e: if e.errno != errno.EPERM: raise try: os.rename(b_tmp_dest_name, b_dest) except (shutil.Error, OSError, IOError) as e: if unsafe_writes and e.errno == errno.EBUSY: self._unsafe_writes(b_tmp_dest_name, b_dest) else: self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' % (src, dest, b_tmp_dest_name, to_native(e)), exception=traceback.format_exc()) except (shutil.Error, OSError, IOError) as e: if unsafe_writes: self._unsafe_writes(b_src, b_dest) else: self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc()) finally: self.cleanup(b_tmp_dest_name) if creating: # make sure the file has the correct permissions # based on the current value of umask umask = os.umask(0) os.umask(umask) os.chmod(b_dest, S_IRWU_RWG_RWO & ~umask) dest_dir_stat = os.stat(os.path.dirname(b_dest)) try: if dest_dir_stat.st_mode & stat.S_ISGID: os.chown(b_dest, os.geteuid(), dest_dir_stat.st_gid) else: os.chown(b_dest, os.geteuid(), os.getegid()) except OSError: # We're okay with trying our best here. If the user is not # root (or old Unices) they won't be able to chown. pass if self.selinux_enabled(): # rename might not preserve context self.set_context_if_different(dest, context, False) def _unsafe_writes(self, src, dest): # sadly there are some situations where we cannot ensure atomicity, but only if # the user insists and we get the appropriate error we update the file unsafely try: out_dest = in_src = None try: out_dest = open(dest, 'wb') in_src = open(src, 'rb') shutil.copyfileobj(in_src, out_dest) finally: # assuring closed files in 2.4 compatible way if out_dest: out_dest.close() if in_src: in_src.close() except (shutil.Error, OSError, IOError) as e: self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)), exception=traceback.format_exc()) def _clean_args(self, args): if not self._clean: # create a printable version of the command for use in reporting later, # which strips out things like passwords from the args list to_clean_args = args if isinstance(args, bytes): to_clean_args = to_text(args) if isinstance(args, (str, bytes)): to_clean_args = shlex.split(to_clean_args) clean_args = [] is_passwd = False for arg in (to_native(a) for a in to_clean_args): if is_passwd: is_passwd = False clean_args.append('********') continue if PASSWD_ARG_RE.match(arg): sep_idx = arg.find('=') if sep_idx > -1: clean_args.append('%s=********' % arg[:sep_idx]) continue else: is_passwd = True arg = heuristic_log_sanitize(arg, self.no_log_values) clean_args.append(arg) self._clean = ' '.join(shlex.quote(arg) for arg in clean_args) return self._clean def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict', expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None, ignore_invalid_cwd=True, handle_exceptions=True): """ Execute a command, returns rc, stdout, and stderr. The mechanism of this method for reading stdout and stderr differs from that of CPython subprocess.Popen.communicate, in that this method will stop reading once the spawned command has exited and stdout and stderr have been consumed, as opposed to waiting until stdout/stderr are closed. This can be an important distinction, when taken into account that a forked or backgrounded process may hold stdout or stderr open for longer than the spawned command. :arg args: is the command to run * If args is a list, the command will be run with shell=False. * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False * If args is a string and use_unsafe_shell=True it runs with shell=True. :kw check_rc: Whether to call fail_json in case of non zero RC. Default False :kw close_fds: See documentation for subprocess.Popen(). Default True :kw executable: See documentation for subprocess.Popen(). Default None :kw data: If given, information to write to the stdin of the command :kw binary_data: If False, append a newline to the data. Default False :kw path_prefix: If given, additional path to find the command in. This adds to the PATH environment variable so helper commands in the same directory can also be found :kw cwd: If given, working directory to run the command inside :kw use_unsafe_shell: See `args` parameter. Default False :kw prompt_regex: Regex string (not a compiled regex) which can be used to detect prompts in the stdout which would otherwise cause the execution to hang (especially if no input data is specified) :kw environ_update: dictionary to *update* environ variables with :kw umask: Umask to be used when running the command. Default None :kw encoding: Since we return native strings, on python3 we need to know the encoding to use to transform from bytes to text. If you want to always get bytes back, use encoding=None. The default is "utf-8". This does not affect transformation of strings given as args. :kw errors: Since we return native strings, on python3 we need to transform stdout and stderr from bytes to text. If the bytes are undecodable in the ``encoding`` specified, then use this error handler to deal with them. The default is ``surrogate_or_strict`` which means that the bytes will be decoded using the surrogateescape error handler if available (available on all python3 versions we support) otherwise a UnicodeError traceback will be raised. This does not affect transformations of strings given as args. :kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument dictates whether ``~`` is expanded in paths and environment variables are expanded before running the command. When ``True`` a string such as ``$SHELL`` will be expanded regardless of escaping. When ``False`` and ``use_unsafe_shell=False`` no path or variable expansion will be done. :kw pass_fds: When running on Python 3 this argument dictates which file descriptors should be passed to an underlying ``Popen`` constructor. On Python 2, this will set ``close_fds`` to False. :kw before_communicate_callback: This function will be called after ``Popen`` object will be created but before communicating to the process. (``Popen`` object will be passed to callback as a first argument) :kw ignore_invalid_cwd: This flag indicates whether an invalid ``cwd`` (non-existent or not a directory) should be ignored or should raise an exception. :kw handle_exceptions: This flag indicates whether an exception will be handled inline and issue a failed_json or if the caller should handle it. :returns: A 3-tuple of return code (integer), stdout (native string), and stderr (native string). On python2, stdout and stderr are both byte strings. On python3, stdout and stderr are text strings converted according to the encoding and errors parameters. If you want byte strings on python3, use encoding=None to turn decoding to text off. """ # used by clean args later on self._clean = None if not isinstance(args, (list, bytes, str)): msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) shell = False if use_unsafe_shell: # stringify args for unsafe/direct shell usage if isinstance(args, list): args = b" ".join([to_bytes(shlex.quote(x), errors='surrogate_or_strict') for x in args]) else: args = to_bytes(args, errors='surrogate_or_strict') # not set explicitly, check if set by controller if executable: executable = to_bytes(executable, errors='surrogate_or_strict') args = [executable, b'-c', args] elif self._shell not in (None, '/bin/sh'): args = [to_bytes(self._shell, errors='surrogate_or_strict'), b'-c', args] else: shell = True else: # ensure args are a list if isinstance(args, (bytes, str)): args = shlex.split(to_text(args, errors='surrogateescape')) # expand ``~`` in paths, and all environment vars if expand_user_and_vars: args = [to_bytes(os.path.expanduser(os.path.expandvars(x)), errors='surrogate_or_strict') for x in args if x is not None] else: args = [to_bytes(x, errors='surrogate_or_strict') for x in args if x is not None] prompt_re = None if prompt_regex: if isinstance(prompt_regex, str): prompt_regex = to_bytes(prompt_regex, errors='surrogateescape') try: prompt_re = re.compile(prompt_regex, re.MULTILINE) except re.error: self.fail_json(msg="invalid prompt regular expression given to run_command") rc = 0 msg = None st_in = None env = os.environ.copy() # We can set this from both an attribute and per call env.update(self.run_command_environ_update or {}) env.update(environ_update or {}) if path_prefix: path = env.get('PATH', '') if path: env['PATH'] = "%s:%s" % (path_prefix, path) else: env['PATH'] = path_prefix # If using test-module.py and explode, the remote lib path will resemble: # /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py # If using ansible or ansible-playbook with a remote system: # /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py # Clean out python paths set by ansiballz if 'PYTHONPATH' in env: pypaths = [x for x in env['PYTHONPATH'].split(':') if x and not x.endswith('/ansible_modlib.zip') and not x.endswith('/debug_dir')] if pypaths and any(pypaths): env['PYTHONPATH'] = ':'.join(pypaths) if data: st_in = subprocess.PIPE def preexec(): if umask: os.umask(umask) kwargs = dict( executable=executable, shell=shell, close_fds=close_fds, stdin=st_in, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=preexec, env=env, ) if pass_fds: kwargs["pass_fds"] = pass_fds # make sure we're in the right working directory if cwd: cwd = to_bytes(os.path.abspath(os.path.expanduser(cwd)), errors='surrogate_or_strict') if os.path.isdir(cwd): kwargs['cwd'] = cwd elif not ignore_invalid_cwd: self.fail_json(msg="Provided cwd is not a valid directory: %s" % cwd) try: if self._debug: self.log('Executing: ' + self._clean_args(args)) cmd = subprocess.Popen(args, **kwargs) if before_communicate_callback: before_communicate_callback(cmd) stdout = b'' stderr = b'' # Mirror the CPython subprocess logic and preference for the selector to use. # poll/select have the advantage of not requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). if hasattr(selectors, 'PollSelector'): selector = selectors.PollSelector() else: selector = selectors.SelectSelector() if data: if not binary_data: data += '\n' if isinstance(data, str): data = to_bytes(data) selector.register(cmd.stdout, selectors.EVENT_READ) selector.register(cmd.stderr, selectors.EVENT_READ) if os.name == 'posix': fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) if data: cmd.stdin.write(data) cmd.stdin.close() while True: # A timeout of 1 is both a little short and a little long. # With None we could deadlock, with a lower value we would # waste cycles. As it is, this is a mild inconvenience if # we need to exit, and likely doesn't waste too many cycles events = selector.select(1) stdout_changed = False for key, event in events: b_chunk = key.fileobj.read(32768) if not b_chunk: selector.unregister(key.fileobj) elif key.fileobj == cmd.stdout: stdout += b_chunk stdout_changed = True elif key.fileobj == cmd.stderr: stderr += b_chunk # if we're checking for prompts, do it now, but only if stdout # actually changed since the last loop if prompt_re and stdout_changed and prompt_re.search(stdout) and not data: if encoding: stdout = to_native(stdout, encoding=encoding, errors=errors) return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") # break out if no pipes are left to read or the pipes are completely read # and the process is terminated if (not events or not selector.get_map()) and cmd.poll() is not None: break # No pipes are left to read but process is not yet terminated # Only then it is safe to wait for the process to be finished # NOTE: Actually cmd.poll() is always None here if no selectors are left elif not selector.get_map() and cmd.poll() is None: cmd.wait() # The process is terminated. Since no pipes to read from are # left, there is no need to call select() again. break cmd.stdout.close() cmd.stderr.close() selector.close() rc = cmd.returncode except (OSError, IOError) as e: self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e))) if handle_exceptions: self.fail_json(rc=e.errno, stdout=b'', stderr=b'', msg=to_native(e), cmd=self._clean_args(args)) else: raise e except Exception as e: self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc()))) if handle_exceptions: self.fail_json(rc=257, stdout=b'', stderr=b'', msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args)) else: raise e if rc != 0 and check_rc: msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values) self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg) if encoding is not None: return (rc, to_native(stdout, encoding=encoding, errors=errors), to_native(stderr, encoding=encoding, errors=errors)) return (rc, stdout, stderr) def append_to_file(self, filename, str): filename = os.path.expandvars(os.path.expanduser(filename)) with open(filename, 'a') as fh: fh.write(str) def bytes_to_human(self, size): return bytes_to_human(size) # for backwards compatibility pretty_bytes = bytes_to_human def human_to_bytes(self, number, isbits=False): return human_to_bytes(number, isbits) # # Backwards compat # # In 2.0, moved from inside the module to the toplevel is_executable = is_executable @staticmethod def get_buffer_size(fd): try: # 1032 == FZ_GETPIPE_SZ buffer_size = fcntl.fcntl(fd, 1032) except Exception: try: # not as exact as above, but should be good enough for most platforms that fail the previous call buffer_size = select.PIPE_BUF except Exception: buffer_size = 9000 # use logical default JIC return buffer_size def get_module_path(): return os.path.dirname(os.path.realpath(__file__)) def __getattr__(importable_name): """Inject import-time deprecation warnings.""" if importable_name == 'get_exception': from ansible.module_utils.pycompat24 import get_exception importable = get_exception elif importable_name in {'literal_eval', '_literal_eval'}: from ast import literal_eval importable = literal_eval elif importable_name == 'datetime': import datetime importable = datetime elif importable_name == 'signal': import signal importable = signal elif importable_name == 'types': import types importable = types elif importable_name == 'chain': from itertools import chain importable = chain elif importable_name == 'repeat': from itertools import repeat importable = repeat elif importable_name in { 'PY2', 'PY3', 'b', 'binary_type', 'integer_types', 'iteritems', 'string_types', 'test_type' }: import importlib importable = getattr( importlib.import_module('ansible.module_utils.six'), importable_name ) elif importable_name == 'map': importable = map elif importable_name == 'shlex_quote': importable = shlex.quote else: raise AttributeError( f'cannot import name {importable_name !r} ' f"from '{__name__}' ({__file__ !s})" ) deprecate( msg=f"Importing '{importable_name}' from '{__name__}' is deprecated.", version="2.21", ) return importable
86,173
Python
.py
1,795
35.420056
158
0.573947
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,212
api.py
ansible_ansible/lib/ansible/module_utils/api.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright: (c) 2015, Brian Coca, <bcoca@ansible.com> # # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """ This module adds shared support for generic api modules In order to use this module, include it as part of a custom module as shown below. The 'api' module provides the following common argument specs: * rate limit spec - rate: number of requests per time unit (int) - rate_limit: time window in which the limit is applied in seconds * retry spec - retries: number of attempts - retry_pause: delay between attempts in seconds """ from __future__ import annotations import copy import functools import itertools import secrets import sys import time import ansible.module_utils.compat.typing as t def rate_limit_argument_spec(spec=None): """Creates an argument spec for working with rate limiting""" arg_spec = (dict( rate=dict(type='int'), rate_limit=dict(type='int'), )) if spec: arg_spec.update(spec) return arg_spec def retry_argument_spec(spec=None): """Creates an argument spec for working with retrying""" arg_spec = (dict( retries=dict(type='int'), retry_pause=dict(type='float', default=1), )) if spec: arg_spec.update(spec) return arg_spec def basic_auth_argument_spec(spec=None): arg_spec = (dict( api_username=dict(type='str'), api_password=dict(type='str', no_log=True), api_url=dict(type='str'), validate_certs=dict(type='bool', default=True) )) if spec: arg_spec.update(spec) return arg_spec def rate_limit(rate=None, rate_limit=None): """rate limiting decorator""" minrate = None if rate is not None and rate_limit is not None: minrate = float(rate_limit) / float(rate) def wrapper(f): last = [0.0] def ratelimited(*args, **kwargs): if sys.version_info >= (3, 8): real_time = time.process_time else: real_time = time.clock if minrate is not None: elapsed = real_time() - last[0] left = minrate - elapsed if left > 0: time.sleep(left) last[0] = real_time() ret = f(*args, **kwargs) return ret return ratelimited return wrapper def retry(retries=None, retry_pause=1): """Retry decorator""" def wrapper(f): def retried(*args, **kwargs): retry_count = 0 if retries is not None: ret = None while True: retry_count += 1 if retry_count >= retries: raise Exception("Retry limit exceeded: %d" % retries) try: ret = f(*args, **kwargs) except Exception: pass if ret: break time.sleep(retry_pause) return ret return retried return wrapper def generate_jittered_backoff(retries=10, delay_base=3, delay_threshold=60): """The "Full Jitter" backoff strategy. Ref: https://www.awsarchitectureblog.com/2015/03/backoff.html :param retries: The number of delays to generate. :param delay_base: The base time in seconds used to calculate the exponential backoff. :param delay_threshold: The maximum time in seconds for any delay. """ for retry in range(0, retries): yield secrets.randbelow(min(delay_threshold, delay_base * 2 ** retry)) def retry_never(exception_or_result): return False def retry_with_delays_and_condition(backoff_iterator, should_retry_error=None): """Generic retry decorator. :param backoff_iterator: An iterable of delays in seconds. :param should_retry_error: A callable that takes an exception of the decorated function and decides whether to retry or not (returns a bool). """ def _emit_isolated_iterator_copies(original_iterator): # type: (t.Iterable[t.Any]) -> t.Generator # Ref: https://stackoverflow.com/a/30232619/595220 _copiable_iterator, _first_iterator_copy = itertools.tee(original_iterator) yield _first_iterator_copy while True: yield copy.copy(_copiable_iterator) backoff_iterator_generator = _emit_isolated_iterator_copies(backoff_iterator) del backoff_iterator # prevent accidental use elsewhere if should_retry_error is None: should_retry_error = retry_never def function_wrapper(function): @functools.wraps(function) def run_function(*args, **kwargs): """This assumes the function has not already been called. If backoff_iterator is empty, we should still run the function a single time with no delay. """ call_retryable_function = functools.partial(function, *args, **kwargs) for delay in next(backoff_iterator_generator): try: return call_retryable_function() except Exception as e: if not should_retry_error(e): raise time.sleep(delay) # Only or final attempt return call_retryable_function() return run_function return function_wrapper
5,785
Python
.py
144
31.4375
145
0.630997
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,213
service.py
ansible_ansible/lib/ansible/module_utils/service.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) Ansible Inc, 2016 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import glob import os import pickle import platform import select import shlex import subprocess import traceback from ansible.module_utils.six import PY2, b from ansible.module_utils.common.text.converters import to_bytes, to_text def sysv_is_enabled(name, runlevel=None): """ This function will check if the service name supplied is enabled in any of the sysv runlevels :arg name: name of the service to test for :kw runlevel: runlevel to check (default: None) """ if runlevel: if not os.path.isdir('/etc/rc0.d/'): return bool(glob.glob('/etc/init.d/rc%s.d/S??%s' % (runlevel, name))) return bool(glob.glob('/etc/rc%s.d/S??%s' % (runlevel, name))) else: if not os.path.isdir('/etc/rc0.d/'): return bool(glob.glob('/etc/init.d/rc?.d/S??%s' % name)) return bool(glob.glob('/etc/rc?.d/S??%s' % name)) def get_sysv_script(name): """ This function will return the expected path for an init script corresponding to the service name supplied. :arg name: name or path of the service to test for """ if name.startswith('/'): result = name else: result = '/etc/init.d/%s' % name return result def sysv_exists(name): """ This function will return True or False depending on the existence of an init script corresponding to the service name supplied. :arg name: name of the service to test for """ return os.path.exists(get_sysv_script(name)) def get_ps(module, pattern): """ Last resort to find a service by trying to match pattern to programs in memory """ found = False if platform.system() == 'SunOS': flags = '-ef' else: flags = 'auxww' psbin = module.get_bin_path('ps', True) (rc, psout, pserr) = module.run_command([psbin, flags]) if rc == 0: for line in psout.splitlines(): if pattern in line: # FIXME: should add logic to prevent matching 'self', though that should be extremely rare found = True break return found def fail_if_missing(module, found, service, msg=''): """ This function will return an error or exit gracefully depending on check mode status and if the service is missing or not. :arg module: is an AnsibleModule object, used for it's utility methods :arg found: boolean indicating if services were found or not :arg service: name of service :kw msg: extra info to append to error/success msg when missing """ if not found: module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg)) def fork_process(): """ This function performs the double fork process to detach from the parent process and execute. """ pid = os.fork() if pid == 0: # Set stdin/stdout/stderr to /dev/null fd = os.open(os.devnull, os.O_RDWR) # clone stdin/out/err for num in range(3): if fd != num: os.dup2(fd, num) # close otherwise if fd not in range(3): os.close(fd) # Make us a daemon pid = os.fork() # end if not in child if pid > 0: os._exit(0) # get new process session and detach os.setsid() # avoid possible problems with cwd being removed os.chdir("/") pid = os.fork() if pid > 0: os._exit(0) return pid def daemonize(module, cmd): """ Execute a command while detaching as a daemon, returns rc, stdout, and stderr. :arg module: is an AnsibleModule object, used for it's utility methods :arg cmd: is a list or string representing the command and options to run This is complex because daemonization is hard for people. What we do is daemonize a part of this module, the daemon runs the command, picks up the return code and output, and returns it to the main process. """ # init some vars chunk = 4096 # FIXME: pass in as arg? errors = 'surrogate_or_strict' # start it! try: pipe = os.pipe() pid = fork_process() except (OSError, RuntimeError): module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc()) # we don't do any locking as this should be a unique module/process if pid == 0: os.close(pipe[0]) # if command is string deal with py2 vs py3 conversions for shlex if not isinstance(cmd, list): if PY2: cmd = shlex.split(to_bytes(cmd, errors=errors)) else: cmd = shlex.split(to_text(cmd, errors=errors)) # make sure we always use byte strings run_cmd = [] for c in cmd: run_cmd.append(to_bytes(c, errors=errors)) # execute the command in forked process p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) fds = [p.stdout, p.stderr] # loop reading output till it is done output = {p.stdout: b(""), p.stderr: b("")} while fds: rfd, wfd, efd = select.select(fds, [], fds, 1) if (rfd + wfd + efd) or p.poll() is None: for out in list(fds): if out in rfd: data = os.read(out.fileno(), chunk) if data: output[out] += to_bytes(data, errors=errors) else: fds.remove(out) else: break # even after fds close, we might want to wait for pid to die p.wait() # Return a pickled data of parent return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL) os.write(pipe[1], to_bytes(return_data, errors=errors)) # clean up os.close(pipe[1]) os._exit(0) elif pid == -1: module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.") else: # in parent os.close(pipe[1]) os.waitpid(pid, 0) # Grab response data after child finishes return_data = b("") while True: rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]]) if pipe[0] in rfd: data = os.read(pipe[0], chunk) if not data: break return_data += to_bytes(data, errors=errors) # Note: no need to specify encoding on py3 as this module sends the # pickle to itself (thus same python interpreter so we aren't mixing # py2 and py3) return pickle.loads(to_bytes(return_data, errors=errors)) def check_ps(module, pattern): # Set ps flags if platform.system() == 'SunOS': psflags = '-ef' else: psflags = 'auxww' # Find ps binary psbin = module.get_bin_path('ps', True) (rc, out, err) = module.run_command('%s %s' % (psbin, psflags)) # If rc is 0, set running as appropriate if rc == 0: for line in out.split('\n'): if pattern in line: return True return False def is_systemd_managed(module): """ Find out if the machine supports systemd or not :arg module: is an AnsibleModule object, used for it's utility methods Returns True if the system supports systemd, False if not. """ # tools must be installed if module.get_bin_path('systemctl'): # This should show if systemd is the boot init system, if checking init failed to mark as systemd # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]: if os.path.exists(canary): return True # If all else fails, check if init is the systemd command, using comm as cmdline could be symlink try: with open('/proc/1/comm', 'r') as init_proc: init = init_proc.readline().strip() return init == 'systemd' except IOError: # If comm doesn't exist, old kernel, no systemd return False return False
10,141
Python
.py
241
34.481328
138
0.638183
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,214
connection.py
ansible_ansible/lib/ansible/module_utils/connection.py
# # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import os import json import pickle import socket import struct import traceback import uuid from functools import partial from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.module_utils.common.json import AnsibleJSONEncoder from ansible.module_utils.six import iteritems def write_to_stream(stream, obj): """Write a length+newline-prefixed pickled object to a stream.""" src = pickle.dumps(obj) stream.write(b'%d\n' % len(src)) stream.write(src) def send_data(s, data): packed_len = struct.pack('!Q', len(data)) return s.sendall(packed_len + data) def recv_data(s): header_len = 8 # size of a packed unsigned long long data = to_bytes("") while len(data) < header_len: d = s.recv(header_len - len(data)) if not d: return None data += d data_len = struct.unpack('!Q', data[:header_len])[0] data = data[header_len:] while len(data) < data_len: d = s.recv(data_len - len(data)) if not d: return None data += d return data def exec_command(module, command): connection = Connection(module._socket_path) try: out = connection.exec_command(command) except ConnectionError as exc: code = getattr(exc, 'code', 1) message = getattr(exc, 'err', exc) return code, '', to_text(message, errors='surrogate_then_replace') return 0, out, '' def request_builder(method_, *args, **kwargs): reqid = str(uuid.uuid4()) req = {'jsonrpc': '2.0', 'method': method_, 'id': reqid} req['params'] = (args, kwargs) return req class ConnectionError(Exception): def __init__(self, message, *args, **kwargs): super(ConnectionError, self).__init__(message) for k, v in iteritems(kwargs): setattr(self, k, v) class Connection(object): def __init__(self, socket_path): if socket_path is None: raise AssertionError('socket_path must be a value') self.socket_path = socket_path def __getattr__(self, name): try: return self.__dict__[name] except KeyError: if name.startswith('_'): raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) return partial(self.__rpc__, name) def _exec_jsonrpc(self, name, *args, **kwargs): req = request_builder(name, *args, **kwargs) reqid = req['id'] if not os.path.exists(self.socket_path): raise ConnectionError( 'socket path %s does not exist or cannot be found. See Troubleshooting socket ' 'path issues in the Network Debug and Troubleshooting Guide' % self.socket_path ) try: data = json.dumps(req, cls=AnsibleJSONEncoder, vault_to_text=True) except TypeError as exc: raise ConnectionError( "Failed to encode some variables as JSON for communication with the persistent connection helper. " "The original exception was: %s" % to_text(exc) ) try: out = self.send(data) except socket.error as e: raise ConnectionError( 'unable to connect to socket %s. See Troubleshooting socket path issues ' 'in the Network Debug and Troubleshooting Guide' % self.socket_path, err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc() ) try: response = json.loads(out) except ValueError: # set_option(s) has sensitive info, and the details are unlikely to matter anyway if name.startswith("set_option"): raise ConnectionError( "Unable to decode JSON from response to {0}. Received '{1}'.".format(name, out) ) params = [repr(arg) for arg in args] + ['{0}={1!r}'.format(k, v) for k, v in iteritems(kwargs)] params = ', '.join(params) raise ConnectionError( "Unable to decode JSON from response to {0}({1}). Received '{2}'.".format(name, params, out) ) if response['id'] != reqid: raise ConnectionError('invalid json-rpc id received') if "result_type" in response: response["result"] = pickle.loads(to_bytes(response["result"], errors="surrogateescape")) return response def __rpc__(self, name, *args, **kwargs): """Executes the json-rpc and returns the output received from remote device. :name: rpc method to be executed over connection plugin that implements jsonrpc 2.0 :args: Ordered list of params passed as arguments to rpc method :kwargs: Dict of valid key, value pairs passed as arguments to rpc method For usage refer the respective connection plugin docs. """ response = self._exec_jsonrpc(name, *args, **kwargs) if 'error' in response: err = response.get('error') msg = err.get('data') or err['message'] code = err['code'] raise ConnectionError(to_text(msg, errors='surrogate_then_replace'), code=code) return response['result'] def send(self, data): try: sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sf.connect(self.socket_path) send_data(sf, to_bytes(data)) response = recv_data(sf) except socket.error as e: sf.close() raise ConnectionError( 'unable to connect to socket %s. See the socket path issue category in ' 'Network Debug and Troubleshooting Guide' % self.socket_path, err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc() ) sf.close() return to_text(response, errors='surrogate_or_strict')
7,671
Python
.py
165
38.442424
115
0.649076
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,215
_text.py
ansible_ansible/lib/ansible/module_utils/_text.py
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """ .. warn:: Use ansible.module_utils.common.text.converters instead. """ from __future__ import annotations # Backwards compat for people still calling it from this package # pylint: disable=unused-import import codecs from ansible.module_utils.six import PY3, text_type, binary_type from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
544
Python
.py
11
48.090909
106
0.797732
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,216
convert_bool.py
ansible_ansible/lib/ansible/module_utils/parsing/convert_bool.py
# Copyright: 2017, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause ) from __future__ import annotations from ansible.module_utils.six import binary_type, text_type from ansible.module_utils.common.text.converters import to_text BOOLEANS_TRUE = frozenset(('y', 'yes', 'on', '1', 'true', 't', 1, 1.0, True)) BOOLEANS_FALSE = frozenset(('n', 'no', 'off', '0', 'false', 'f', 0, 0.0, False)) BOOLEANS = BOOLEANS_TRUE.union(BOOLEANS_FALSE) def boolean(value, strict=True): if isinstance(value, bool): return value normalized_value = value if isinstance(value, (text_type, binary_type)): normalized_value = to_text(value, errors='surrogate_or_strict').lower().strip() if normalized_value in BOOLEANS_TRUE: return True elif normalized_value in BOOLEANS_FALSE or not strict: return False raise TypeError("The value '%s' is not a valid boolean. Valid booleans include: %s" % (to_text(value), ', '.join(repr(i) for i in BOOLEANS)))
1,061
Python
.py
19
51.421053
146
0.703775
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,217
__init__.py
ansible_ansible/lib/ansible/module_utils/six/__init__.py
# This code is strewn with things that are not defined on Python3 (unicode, # long, etc) but they are all shielded by version checks. This is also an # upstream vendored file that we're not going to modify on our own # pylint: disable=undefined-variable # # Copyright (c) 2010-2020 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Utilities for writing code that runs on Python 2 and 3""" from __future__ import annotations import functools import itertools import operator import sys import types # The following makes it easier for us to script updates of the bundled code. It is not part of # upstream six _BUNDLED_METADATA = {"pypi_name": "six", "version": "1.16.0"} __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.16.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X if PY34: from importlib.util import spec_from_loader else: spec_from_loader = None def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def find_spec(self, fullname, path, target=None): if fullname in self.known_modules: return spec_from_loader(fullname, self) return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code def create_module(self, spec): return self.load_module(spec.name) def exec_module(self, module): pass _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("splitvalue", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), MovedAttribute("parse_http_list", "urllib2", "urllib.request"), MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO del io _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _assertNotRegex = "assertNotRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" _assertNotRegex = "assertNotRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _assertNotRegex = "assertNotRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) def assertNotRegex(self, *args, **kwargs): return getattr(self, _assertNotRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): try: if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value finally: value = None tb = None else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): try: raise tp, value, tb finally: tb = None """) if sys.version_info[:2] > (3,): exec_("""def raise_from(value, from_value): try: raise value from from_value finally: value = None """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): # This does exactly the same what the :func:`py3:functools.update_wrapper` # function does on Python versions after 3.2. It sets the ``__wrapped__`` # attribute on ``wrapper`` object and it doesn't raise an error if any of # the attributes mentioned in ``assigned`` and ``updated`` are missing on # ``wrapped`` object. def _update_wrapper(wrapper, wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): for attr in assigned: try: value = getattr(wrapped, attr) except AttributeError: continue else: setattr(wrapper, attr, value) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) wrapper.__wrapped__ = wrapped return wrapper _update_wrapper.__doc__ = functools.update_wrapper.__doc__ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): return functools.partial(_update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated) wraps.__doc__ = functools.wraps.__doc__ else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(type): def __new__(cls, name, this_bases, d): if sys.version_info[:2] >= (3, 7): # This version introduced PEP 560 that requires a bit # of extra care (we mimic what is done by __build_class__). resolved_bases = types.resolve_bases(bases) if resolved_bases is not bases: d['__orig_bases__'] = bases else: resolved_bases = bases return meta(name, resolved_bases, d) @classmethod def __prepare__(cls, name, this_bases): return meta.__prepare__(name, bases) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) if hasattr(cls, '__qualname__'): orig_vars['__qualname__'] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def ensure_binary(s, encoding='utf-8', errors='strict'): """Coerce **s** to six.binary_type. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> encoded to `bytes` - `bytes` -> `bytes` """ if isinstance(s, binary_type): return s if isinstance(s, text_type): return s.encode(encoding, errors) raise TypeError("not expecting type '%s'" % type(s)) def ensure_str(s, encoding='utf-8', errors='strict'): """Coerce *s* to `str`. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str` """ # Optimization: Fast return for the common case. if type(s) is str: return s if PY2 and isinstance(s, text_type): return s.encode(encoding, errors) elif PY3 and isinstance(s, binary_type): return s.decode(encoding, errors) elif not isinstance(s, (text_type, binary_type)): raise TypeError("not expecting type '%s'" % type(s)) return s def ensure_text(s, encoding='utf-8', errors='strict'): """Coerce *s* to six.text_type. For Python 2: - `unicode` -> `unicode` - `str` -> `unicode` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str` """ if isinstance(s, binary_type): return s.decode(encoding, errors) elif isinstance(s, text_type): return s else: raise TypeError("not expecting type '%s'" % type(s)) def python_2_unicode_compatible(klass): """ A class decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
34,978
Python
.py
816
35.903186
118
0.644588
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,218
warnings.py
ansible_ansible/lib/ansible/module_utils/common/warnings.py
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations from ansible.module_utils.six import string_types _global_warnings = [] _global_deprecations = [] def warn(warning): if isinstance(warning, string_types): _global_warnings.append(warning) else: raise TypeError("warn requires a string not a %s" % type(warning)) def deprecate(msg, version=None, date=None, collection_name=None): if isinstance(msg, string_types): # For compatibility, we accept that neither version nor date is set, # and treat that the same as if version would haven been set if date is not None: _global_deprecations.append({'msg': msg, 'date': date, 'collection_name': collection_name}) else: _global_deprecations.append({'msg': msg, 'version': version, 'collection_name': collection_name}) else: raise TypeError("deprecate requires a string not a %s" % type(msg)) def get_warning_messages(): """Return a tuple of warning messages accumulated over this run""" return tuple(_global_warnings) def get_deprecation_messages(): """Return a tuple of deprecations accumulated over this run""" return tuple(_global_deprecations)
1,365
Python
.py
28
43.357143
109
0.706637
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,219
arg_spec.py
ansible_ansible/lib/ansible/module_utils/common/arg_spec.py
# -*- coding: utf-8 -*- # Copyright (c) 2021 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations from copy import deepcopy from ansible.module_utils.common.parameters import ( _ADDITIONAL_CHECKS, _get_legal_inputs, _get_unsupported_parameters, _handle_aliases, _list_deprecations, _list_no_log_values, _set_defaults, _validate_argument_types, _validate_argument_values, _validate_sub_spec, set_fallbacks, ) from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.warnings import deprecate, warn from ansible.module_utils.common.validation import ( check_mutually_exclusive, check_required_arguments, ) from ansible.module_utils.errors import ( AliasError, AnsibleValidationErrorMultiple, DeprecationError, MutuallyExclusiveError, NoLogError, RequiredDefaultError, RequiredError, UnsupportedError, ) class ValidationResult: """Result of argument spec validation. This is the object returned by :func:`ArgumentSpecValidator.validate() <ansible.module_utils.common.arg_spec.ArgumentSpecValidator.validate()>` containing the validated parameters and any errors. """ def __init__(self, parameters): """ :arg parameters: Terms to be validated and coerced to the correct type. :type parameters: dict """ self._no_log_values = set() """:class:`set` of values marked as ``no_log`` in the argument spec. This is a temporary holding place for these values and may move in the future. """ self._unsupported_parameters = set() self._supported_parameters = dict() self._validated_parameters = deepcopy(parameters) self._deprecations = [] self._warnings = [] self._aliases = {} self.errors = AnsibleValidationErrorMultiple() """ :class:`~ansible.module_utils.errors.AnsibleValidationErrorMultiple` containing all :class:`~ansible.module_utils.errors.AnsibleValidationError` objects if there were any failures during validation. """ @property def validated_parameters(self): """Validated and coerced parameters.""" return self._validated_parameters @property def unsupported_parameters(self): """:class:`set` of unsupported parameter names.""" return self._unsupported_parameters @property def error_messages(self): """:class:`list` of all error messages from each exception in :attr:`errors`.""" return self.errors.messages class ArgumentSpecValidator: """Argument spec validation class Creates a validator based on the ``argument_spec`` that can be used to validate a number of parameters using the :meth:`validate` method. """ def __init__(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None, ): """ :arg argument_spec: Specification of valid parameters and their type. May include nested argument specs. :type argument_spec: dict[str, dict] :kwarg mutually_exclusive: List or list of lists of terms that should not be provided together. :type mutually_exclusive: list[str] or list[list[str]] :kwarg required_together: List of lists of terms that are required together. :type required_together: list[list[str]] :kwarg required_one_of: List of lists of terms, one of which in each list is required. :type required_one_of: list[list[str]] :kwarg required_if: List of lists of ``[parameter, value, [parameters]]`` where one of ``[parameters]`` is required if ``parameter == value``. :type required_if: list :kwarg required_by: Dictionary of parameter names that contain a list of parameters required by each key in the dictionary. :type required_by: dict[str, list[str]] """ self._mutually_exclusive = mutually_exclusive self._required_together = required_together self._required_one_of = required_one_of self._required_if = required_if self._required_by = required_by self._valid_parameter_names = set() self.argument_spec = argument_spec for key in sorted(self.argument_spec.keys()): aliases = self.argument_spec[key].get('aliases') if aliases: self._valid_parameter_names.update(["{key} ({aliases})".format(key=key, aliases=", ".join(sorted(aliases)))]) else: self._valid_parameter_names.update([key]) def validate(self, parameters, *args, **kwargs): """Validate ``parameters`` against argument spec. Error messages in the :class:`ValidationResult` may contain no_log values and should be sanitized with :func:`~ansible.module_utils.common.parameters.sanitize_keys` before logging or displaying. :arg parameters: Parameters to validate against the argument spec :type parameters: dict[str, dict] :return: :class:`ValidationResult` containing validated parameters. :Simple Example: .. code-block:: text argument_spec = { 'name': {'type': 'str'}, 'age': {'type': 'int'}, } parameters = { 'name': 'bo', 'age': '42', } validator = ArgumentSpecValidator(argument_spec) result = validator.validate(parameters) if result.error_messages: sys.exit("Validation failed: {0}".format(", ".join(result.error_messages)) valid_params = result.validated_parameters """ result = ValidationResult(parameters) result._no_log_values.update(set_fallbacks(self.argument_spec, result._validated_parameters)) alias_warnings = [] alias_deprecations = [] try: result._aliases.update(_handle_aliases(self.argument_spec, result._validated_parameters, alias_warnings, alias_deprecations)) except (TypeError, ValueError) as e: result.errors.append(AliasError(to_native(e))) legal_inputs = _get_legal_inputs(self.argument_spec, result._validated_parameters, result._aliases) for option, alias in alias_warnings: result._warnings.append({'option': option, 'alias': alias}) for deprecation in alias_deprecations: result._deprecations.append({ 'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'], 'version': deprecation.get('version'), 'date': deprecation.get('date'), 'collection_name': deprecation.get('collection_name'), }) try: result._no_log_values.update(_list_no_log_values(self.argument_spec, result._validated_parameters)) except TypeError as te: result.errors.append(NoLogError(to_native(te))) try: result._deprecations.extend(_list_deprecations(self.argument_spec, result._validated_parameters)) except TypeError as te: result.errors.append(DeprecationError(to_native(te))) try: result._unsupported_parameters.update( _get_unsupported_parameters( self.argument_spec, result._validated_parameters, legal_inputs, store_supported=result._supported_parameters, ) ) except TypeError as te: result.errors.append(RequiredDefaultError(to_native(te))) except ValueError as ve: result.errors.append(AliasError(to_native(ve))) try: check_mutually_exclusive(self._mutually_exclusive, result._validated_parameters) except TypeError as te: result.errors.append(MutuallyExclusiveError(to_native(te))) result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters, False)) try: check_required_arguments(self.argument_spec, result._validated_parameters) except TypeError as e: result.errors.append(RequiredError(to_native(e))) _validate_argument_types(self.argument_spec, result._validated_parameters, errors=result.errors) _validate_argument_values(self.argument_spec, result._validated_parameters, errors=result.errors) for check in _ADDITIONAL_CHECKS: try: check['func'](getattr(self, "_{attr}".format(attr=check['attr'])), result._validated_parameters) except TypeError as te: result.errors.append(check['err'](to_native(te))) result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters)) alias_deprecations = [] _validate_sub_spec(self.argument_spec, result._validated_parameters, errors=result.errors, no_log_values=result._no_log_values, unsupported_parameters=result._unsupported_parameters, supported_parameters=result._supported_parameters, alias_deprecations=alias_deprecations,) for deprecation in alias_deprecations: result._deprecations.append({ 'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'], 'version': deprecation.get('version'), 'date': deprecation.get('date'), 'collection_name': deprecation.get('collection_name'), }) if result._unsupported_parameters: flattened_names = [] for item in result._unsupported_parameters: if isinstance(item, tuple): flattened_names.append(".".join(item)) else: flattened_names.append(item) unsupported_string = ", ".join(sorted(list(flattened_names))) supported_params = supported_aliases = [] if result._supported_parameters.get(item): supported_params = sorted(list(result._supported_parameters[item][0])) supported_aliases = sorted(list(result._supported_parameters[item][1])) supported_string = ", ".join(supported_params) if supported_aliases: aliases_string = ", ".join(supported_aliases) supported_string += " (%s)" % aliases_string msg = "{0}. Supported parameters include: {1}.".format(unsupported_string, supported_string) result.errors.append(UnsupportedError(msg)) return result class ModuleArgumentSpecValidator(ArgumentSpecValidator): """Argument spec validation class used by :class:`AnsibleModule`. This is not meant to be used outside of :class:`AnsibleModule`. Use :class:`ArgumentSpecValidator` instead. """ def __init__(self, *args, **kwargs): super(ModuleArgumentSpecValidator, self).__init__(*args, **kwargs) def validate(self, parameters): result = super(ModuleArgumentSpecValidator, self).validate(parameters) for d in result._deprecations: deprecate(d['msg'], version=d.get('version'), date=d.get('date'), collection_name=d.get('collection_name')) for w in result._warnings: warn('Both option {option} and its alias {alias} are set.'.format(option=w['option'], alias=w['alias'])) return result
12,012
Python
.py
245
38.17551
137
0.632285
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,220
network.py
ansible_ansible/lib/ansible/module_utils/common/network.py
# Copyright (c) 2016 Red Hat Inc # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) # General networking tools that may be used by all modules from __future__ import annotations import re from struct import pack from socket import inet_ntoa from ansible.module_utils.six.moves import zip VALID_MASKS = [2**8 - 2**i for i in range(0, 9)] def is_netmask(val): parts = str(val).split('.') if not len(parts) == 4: return False for part in parts: try: if int(part) not in VALID_MASKS: raise ValueError except ValueError: return False return True def is_masklen(val): try: return 0 <= int(val) <= 32 except ValueError: return False def to_netmask(val): """ converts a masklen to a netmask """ if not is_masklen(val): raise ValueError('invalid value for masklen') bits = 0 for i in range(32 - int(val), 32): bits |= (1 << i) return inet_ntoa(pack('>I', bits)) def to_masklen(val): """ converts a netmask to a masklen """ if not is_netmask(val): raise ValueError('invalid value for netmask: %s' % val) bits = list() for x in val.split('.'): octet = bin(int(x)).count('1') bits.append(octet) return sum(bits) def to_subnet(addr, mask, dotted_notation=False): """ converts an addr / mask pair to a subnet in cidr notation """ try: if not is_masklen(mask): raise ValueError cidr = int(mask) mask = to_netmask(mask) except ValueError: cidr = to_masklen(mask) addr = addr.split('.') mask = mask.split('.') network = list() for s_addr, s_mask in zip(addr, mask): network.append(str(int(s_addr) & int(s_mask))) if dotted_notation: return '%s %s' % ('.'.join(network), to_netmask(cidr)) return '%s/%s' % ('.'.join(network), cidr) def to_ipv6_subnet(addr): """ IPv6 addresses are eight groupings. The first four groupings (64 bits) comprise the subnet address. """ # https://tools.ietf.org/rfc/rfc2374.txt # Split by :: to identify omitted zeros ipv6_prefix = addr.split('::')[0] # Get the first four groups, or as many as are found + :: found_groups = [] for group in ipv6_prefix.split(':'): found_groups.append(group) if len(found_groups) == 4: break if len(found_groups) < 4: found_groups.append('::') # Concatenate network address parts network_addr = '' for group in found_groups: if group != '::': network_addr += str(group) network_addr += str(':') # Ensure network address ends with :: if not network_addr.endswith('::'): network_addr += str(':') return network_addr def to_ipv6_network(addr): """ IPv6 addresses are eight groupings. The first three groupings (48 bits) comprise the network address. """ # Split by :: to identify omitted zeros ipv6_prefix = addr.split('::')[0] # Get the first three groups, or as many as are found + :: found_groups = [] for group in ipv6_prefix.split(':'): found_groups.append(group) if len(found_groups) == 3: break if len(found_groups) < 3: found_groups.append('::') # Concatenate network address parts network_addr = '' for group in found_groups: if group != '::': network_addr += str(group) network_addr += str(':') # Ensure network address ends with :: if not network_addr.endswith('::'): network_addr += str(':') return network_addr def to_bits(val): """ converts a netmask to bits """ bits = '' for octet in val.split('.'): bits += bin(int(octet))[2:].zfill(8) return bits def is_mac(mac_address): """ Validate MAC address for given string Args: mac_address: string to validate as MAC address Returns: (Boolean) True if string is valid MAC address, otherwise False """ mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$') return bool(mac_addr_regex.match(mac_address.lower()))
4,226
Python
.py
119
29.193277
113
0.614117
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,221
parameters.py
ansible_ansible/lib/ansible/module_utils/common/parameters.py
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import datetime import os from collections import deque from itertools import chain from ansible.module_utils.common.collections import is_iterable from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.common.warnings import warn from ansible.module_utils.errors import ( AliasError, AnsibleFallbackNotFound, AnsibleValidationErrorMultiple, ArgumentTypeError, ArgumentValueError, ElementError, MutuallyExclusiveError, NoLogError, RequiredByError, RequiredError, RequiredIfError, RequiredOneOfError, RequiredTogetherError, SubParameterTypeError, ) from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE from ansible.module_utils.six.moves.collections_abc import ( KeysView, Set, Sequence, Mapping, MutableMapping, MutableSet, MutableSequence, ) from ansible.module_utils.six import ( binary_type, integer_types, string_types, text_type, PY2, PY3, ) from ansible.module_utils.common.validation import ( check_mutually_exclusive, check_required_arguments, check_required_together, check_required_one_of, check_required_if, check_required_by, check_type_bits, check_type_bool, check_type_bytes, check_type_dict, check_type_float, check_type_int, check_type_jsonarg, check_type_list, check_type_path, check_type_raw, check_type_str, ) # Python2 & 3 way to get NoneType NoneType = type(None) _ADDITIONAL_CHECKS = ( {'func': check_required_together, 'attr': 'required_together', 'err': RequiredTogetherError}, {'func': check_required_one_of, 'attr': 'required_one_of', 'err': RequiredOneOfError}, {'func': check_required_if, 'attr': 'required_if', 'err': RequiredIfError}, {'func': check_required_by, 'attr': 'required_by', 'err': RequiredByError}, ) # if adding boolean attribute, also add to PASS_BOOL # some of this dupes defaults from controller config # keep in sync with copy in lib/ansible/module_utils/csharp/Ansible.Basic.cs PASS_VARS = { 'check_mode': ('check_mode', False), 'debug': ('_debug', False), 'diff': ('_diff', False), 'keep_remote_files': ('_keep_remote_files', False), 'ignore_unknown_opts': ('_ignore_unknown_opts', False), 'module_name': ('_name', None), 'no_log': ('no_log', False), 'remote_tmp': ('_remote_tmp', None), 'target_log_info': ('_target_log_info', None), 'selinux_special_fs': ('_selinux_special_fs', ['fuse', 'nfs', 'vboxsf', 'ramfs', '9p', 'vfat']), 'shell_executable': ('_shell', '/bin/sh'), 'socket': ('_socket_path', None), 'string_conversion_action': ('_string_conversion_action', 'warn'), 'syslog_facility': ('_syslog_facility', 'INFO'), 'tmpdir': ('_tmpdir', None), 'verbosity': ('_verbosity', 0), 'version': ('ansible_version', '0.0'), } PASS_BOOLS = ('check_mode', 'debug', 'diff', 'keep_remote_files', 'ignore_unknown_opts', 'no_log') DEFAULT_TYPE_VALIDATORS = { 'str': check_type_str, 'list': check_type_list, 'dict': check_type_dict, 'bool': check_type_bool, 'int': check_type_int, 'float': check_type_float, 'path': check_type_path, 'raw': check_type_raw, 'jsonarg': check_type_jsonarg, 'json': check_type_jsonarg, 'bytes': check_type_bytes, 'bits': check_type_bits, } def _get_type_validator(wanted): """Returns the callable used to validate a wanted type and the type name. :arg wanted: String or callable. If a string, get the corresponding validation function from DEFAULT_TYPE_VALIDATORS. If callable, get the name of the custom callable and return that for the type_checker. :returns: Tuple of callable function or None, and a string that is the name of the wanted type. """ # Use one of our builtin validators. if not callable(wanted): if wanted is None: # Default type for parameters wanted = 'str' type_checker = DEFAULT_TYPE_VALIDATORS.get(wanted) # Use the custom callable for validation. else: type_checker = wanted wanted = getattr(wanted, '__name__', to_native(type(wanted))) return type_checker, wanted def _get_legal_inputs(argument_spec, parameters, aliases=None): if aliases is None: aliases = _handle_aliases(argument_spec, parameters) return list(aliases.keys()) + list(argument_spec.keys()) def _get_unsupported_parameters(argument_spec, parameters, legal_inputs=None, options_context=None, store_supported=None): """Check keys in parameters against those provided in legal_inputs to ensure they contain legal values. If legal_inputs are not supplied, they will be generated using the argument_spec. :arg argument_spec: Dictionary of parameters, their type, and valid values. :arg parameters: Dictionary of parameters. :arg legal_inputs: List of valid key names property names. Overrides values in argument_spec. :arg options_context: List of parent keys for tracking the context of where a parameter is defined. :returns: Set of unsupported parameters. Empty set if no unsupported parameters are found. """ if legal_inputs is None: legal_inputs = _get_legal_inputs(argument_spec, parameters) unsupported_parameters = set() for k in parameters.keys(): if k not in legal_inputs: context = k if options_context: context = tuple(options_context + [k]) unsupported_parameters.add(context) if store_supported is not None: supported_aliases = _handle_aliases(argument_spec, parameters) supported_params = [] for option in legal_inputs: if option in supported_aliases: continue supported_params.append(option) store_supported.update({context: (supported_params, supported_aliases)}) return unsupported_parameters def _handle_aliases(argument_spec, parameters, alias_warnings=None, alias_deprecations=None): """Process aliases from an argument_spec including warnings and deprecations. Modify ``parameters`` by adding a new key for each alias with the supplied value from ``parameters``. If a list is provided to the alias_warnings parameter, it will be filled with tuples (option, alias) in every case where both an option and its alias are specified. If a list is provided to alias_deprecations, it will be populated with dictionaries, each containing deprecation information for each alias found in argument_spec. :param argument_spec: Dictionary of parameters, their type, and valid values. :type argument_spec: dict :param parameters: Dictionary of parameters. :type parameters: dict :param alias_warnings: :type alias_warnings: list :param alias_deprecations: :type alias_deprecations: list """ aliases_results = {} # alias:canon for (k, v) in argument_spec.items(): aliases = v.get('aliases', None) default = v.get('default', None) required = v.get('required', False) if alias_deprecations is not None: for alias in argument_spec[k].get('deprecated_aliases', []): if alias.get('name') in parameters: alias_deprecations.append(alias) if default is not None and required: # not alias specific but this is a good place to check this raise ValueError("internal error: required and default are mutually exclusive for %s" % k) if aliases is None: continue if not is_iterable(aliases) or isinstance(aliases, (binary_type, text_type)): raise TypeError('internal error: aliases must be a list or tuple') for alias in aliases: aliases_results[alias] = k if alias in parameters: if k in parameters and alias_warnings is not None: alias_warnings.append((k, alias)) parameters[k] = parameters[alias] return aliases_results def _list_deprecations(argument_spec, parameters, prefix=''): """Return a list of deprecations :arg argument_spec: An argument spec dictionary :arg parameters: Dictionary of parameters :returns: List of dictionaries containing a message and version in which the deprecated parameter will be removed, or an empty list. :Example return: .. code-block:: python [ { 'msg': "Param 'deptest' is deprecated. See the module docs for more information", 'version': '2.9' } ] """ deprecations = [] for arg_name, arg_opts in argument_spec.items(): if arg_name in parameters: if prefix: sub_prefix = '%s["%s"]' % (prefix, arg_name) else: sub_prefix = arg_name if arg_opts.get('removed_at_date') is not None: deprecations.append({ 'msg': "Param '%s' is deprecated. See the module docs for more information" % sub_prefix, 'date': arg_opts.get('removed_at_date'), 'collection_name': arg_opts.get('removed_from_collection'), }) elif arg_opts.get('removed_in_version') is not None: deprecations.append({ 'msg': "Param '%s' is deprecated. See the module docs for more information" % sub_prefix, 'version': arg_opts.get('removed_in_version'), 'collection_name': arg_opts.get('removed_from_collection'), }) # Check sub-argument spec sub_argument_spec = arg_opts.get('options') if sub_argument_spec is not None: sub_arguments = parameters[arg_name] if isinstance(sub_arguments, Mapping): sub_arguments = [sub_arguments] if isinstance(sub_arguments, list): for sub_params in sub_arguments: if isinstance(sub_params, Mapping): deprecations.extend(_list_deprecations(sub_argument_spec, sub_params, prefix=sub_prefix)) return deprecations def _list_no_log_values(argument_spec, params): """Return set of no log values :arg argument_spec: An argument spec dictionary :arg params: Dictionary of all parameters :returns: :class:`set` of strings that should be hidden from output: """ no_log_values = set() for arg_name, arg_opts in argument_spec.items(): if arg_opts.get('no_log', False): # Find the value for the no_log'd param no_log_object = params.get(arg_name, None) if no_log_object: try: no_log_values.update(_return_datastructure_name(no_log_object)) except TypeError as e: raise TypeError('Failed to convert "%s": %s' % (arg_name, to_native(e))) # Get no_log values from suboptions sub_argument_spec = arg_opts.get('options') if sub_argument_spec is not None: wanted_type = arg_opts.get('type') sub_parameters = params.get(arg_name) if sub_parameters is not None: if wanted_type == 'dict' or (wanted_type == 'list' and arg_opts.get('elements', '') == 'dict'): # Sub parameters can be a dict or list of dicts. Ensure parameters are always a list. if not isinstance(sub_parameters, list): sub_parameters = [sub_parameters] for sub_param in sub_parameters: # Validate dict fields in case they came in as strings if isinstance(sub_param, string_types): sub_param = check_type_dict(sub_param) if not isinstance(sub_param, Mapping): raise TypeError("Value '{1}' in the sub parameter field '{0}' must be a {2}, " "not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type)) no_log_values.update(_list_no_log_values(sub_argument_spec, sub_param)) return no_log_values def _return_datastructure_name(obj): """ Return native stringified values from datastructures. For use with removing sensitive values pre-jsonification.""" if isinstance(obj, (text_type, binary_type)): if obj: yield to_native(obj, errors='surrogate_or_strict') return elif isinstance(obj, Mapping): for element in obj.items(): yield from _return_datastructure_name(element[1]) elif is_iterable(obj): for element in obj: yield from _return_datastructure_name(element) elif obj is None or isinstance(obj, bool): # This must come before int because bools are also ints return elif isinstance(obj, tuple(list(integer_types) + [float])): yield to_native(obj, nonstring='simplerepr') else: raise TypeError('Unknown parameter type: %s' % (type(obj))) def _remove_values_conditions(value, no_log_strings, deferred_removals): """ Helper function for :meth:`remove_values`. :arg value: The value to check for strings that need to be stripped :arg no_log_strings: set of strings which must be stripped out of any values :arg deferred_removals: List which holds information about nested containers that have to be iterated for removals. It is passed into this function so that more entries can be added to it if value is a container type. The format of each entry is a 2-tuple where the first element is the ``value`` parameter and the second value is a new container to copy the elements of ``value`` into once iterated. :returns: if ``value`` is a scalar, returns ``value`` with two exceptions: 1. :class:`~datetime.datetime` objects which are changed into a string representation. 2. objects which are in ``no_log_strings`` are replaced with a placeholder so that no sensitive data is leaked. If ``value`` is a container type, returns a new empty container. ``deferred_removals`` is added to as a side-effect of this function. .. warning:: It is up to the caller to make sure the order in which value is passed in is correct. For instance, higher level containers need to be passed in before lower level containers. For example, given ``{'level1': {'level2': 'level3': [True]} }`` first pass in the dictionary for ``level1``, then the dict for ``level2``, and finally the list for ``level3``. """ if isinstance(value, (text_type, binary_type)): # Need native str type native_str_value = value if isinstance(value, text_type): value_is_text = True if PY2: native_str_value = to_bytes(value, errors='surrogate_or_strict') elif isinstance(value, binary_type): value_is_text = False if PY3: native_str_value = to_text(value, errors='surrogate_or_strict') if native_str_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: native_str_value = native_str_value.replace(omit_me, '*' * 8) if value_is_text and isinstance(native_str_value, binary_type): value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace') elif not value_is_text and isinstance(native_str_value, text_type): value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace') else: value = native_str_value elif isinstance(value, Sequence): if isinstance(value, MutableSequence): new_value = type(value)() else: new_value = [] # Need a mutable value deferred_removals.append((value, new_value)) value = new_value elif isinstance(value, Set): if isinstance(value, MutableSet): new_value = type(value)() else: new_value = set() # Need a mutable value deferred_removals.append((value, new_value)) value = new_value elif isinstance(value, Mapping): if isinstance(value, MutableMapping): new_value = type(value)() else: new_value = {} # Need a mutable value deferred_removals.append((value, new_value)) value = new_value elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))): stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict') if stringy_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: if omit_me in stringy_value: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' elif isinstance(value, (datetime.datetime, datetime.date)): value = value.isoformat() else: raise TypeError('Value of unknown type: %s, %s' % (type(value), value)) return value def _set_defaults(argument_spec, parameters, set_default=True): """Set default values for parameters when no value is supplied. Modifies parameters directly. :arg argument_spec: Argument spec :type argument_spec: dict :arg parameters: Parameters to evaluate :type parameters: dict :kwarg set_default: Whether or not to set the default values :type set_default: bool :returns: Set of strings that should not be logged. :rtype: set """ no_log_values = set() for param, value in argument_spec.items(): # TODO: Change the default value from None to Sentinel to differentiate between # user supplied None and a default value set by this function. default = value.get('default', None) # This prevents setting defaults on required items on the 1st run, # otherwise will set things without a default to None on the 2nd. if param not in parameters and (default is not None or set_default): # Make sure any default value for no_log fields are masked. if value.get('no_log', False) and default: no_log_values.add(default) parameters[param] = default return no_log_values def _sanitize_keys_conditions(value, no_log_strings, ignore_keys, deferred_removals): """ Helper method to :func:`sanitize_keys` to build ``deferred_removals`` and avoid deep recursion. """ if isinstance(value, (text_type, binary_type)): return value if isinstance(value, Sequence): if isinstance(value, MutableSequence): new_value = type(value)() else: new_value = [] # Need a mutable value deferred_removals.append((value, new_value)) return new_value if isinstance(value, Set): if isinstance(value, MutableSet): new_value = type(value)() else: new_value = set() # Need a mutable value deferred_removals.append((value, new_value)) return new_value if isinstance(value, Mapping): if isinstance(value, MutableMapping): new_value = type(value)() else: new_value = {} # Need a mutable value deferred_removals.append((value, new_value)) return new_value if isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))): return value if isinstance(value, (datetime.datetime, datetime.date)): return value raise TypeError('Value of unknown type: %s, %s' % (type(value), value)) def _validate_elements(wanted_type, parameter, values, options_context=None, errors=None): if errors is None: errors = AnsibleValidationErrorMultiple() type_checker, wanted_element_type = _get_type_validator(wanted_type) validated_parameters = [] # Get param name for strings so we can later display this value in a useful error message if needed # Only pass 'kwargs' to our checkers and ignore custom callable checkers kwargs = {} if wanted_element_type == 'str' and isinstance(wanted_type, string_types): if isinstance(parameter, string_types): kwargs['param'] = parameter elif isinstance(parameter, dict): kwargs['param'] = list(parameter.keys())[0] for value in values: try: validated_parameters.append(type_checker(value, **kwargs)) except (TypeError, ValueError) as e: msg = "Elements value for option '%s'" % parameter if options_context: msg += " found in '%s'" % " -> ".join(options_context) msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_element_type, to_native(e)) errors.append(ElementError(msg)) return validated_parameters def _validate_argument_types(argument_spec, parameters, prefix='', options_context=None, errors=None): """Validate that parameter types match the type in the argument spec. Determine the appropriate type checker function and run each parameter value through that function. All error messages from type checker functions are returned. If any parameter fails to validate, it will not be in the returned parameters. :arg argument_spec: Argument spec :type argument_spec: dict :arg parameters: Parameters :type parameters: dict :kwarg prefix: Name of the parent key that contains the spec. Used in the error message :type prefix: str :kwarg options_context: List of contexts? :type options_context: list :returns: Two item tuple containing validated and coerced parameters and a list of any errors that were encountered. :rtype: tuple """ if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): if param not in parameters: continue value = parameters[param] if value is None and not spec.get('required') and spec.get('default') is None: continue wanted_type = spec.get('type') type_checker, wanted_name = _get_type_validator(wanted_type) # Get param name for strings so we can later display this value in a useful error message if needed # Only pass 'kwargs' to our checkers and ignore custom callable checkers kwargs = {} if wanted_name == 'str' and isinstance(wanted_type, string_types): kwargs['param'] = list(parameters.keys())[0] # Get the name of the parent key if this is a nested option if prefix: kwargs['prefix'] = prefix try: parameters[param] = type_checker(value, **kwargs) elements_wanted_type = spec.get('elements', None) if elements_wanted_type: elements = parameters[param] if wanted_type != 'list' or not isinstance(elements, list): msg = "Invalid type %s for option '%s'" % (wanted_name, elements) if options_context: msg += " found in '%s'." % " -> ".join(options_context) msg += ", elements value check is supported only with 'list' type" errors.append(ArgumentTypeError(msg)) parameters[param] = _validate_elements(elements_wanted_type, param, elements, options_context, errors) except (TypeError, ValueError) as e: msg = "argument '%s' is of type %s" % (param, type(value)) if options_context: msg += " found in '%s'." % " -> ".join(options_context) msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e)) errors.append(ArgumentTypeError(msg)) def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None): """Ensure all arguments have the requested values, and there are no stray arguments""" if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): choices = spec.get('choices') if choices is None: continue if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)): if param in parameters: # Allow one or more when type='list' param with choices if isinstance(parameters[param], list): diff_list = [item for item in parameters[param] if item not in choices] if diff_list: choices_str = ", ".join([to_native(c) for c in choices]) diff_str = ", ".join([to_native(c) for c in diff_list]) msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_str) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) elif parameters[param] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. if parameters[param] == 'False': overlap = BOOLEANS_FALSE.intersection(choices) if len(overlap) == 1: # Extract from a set (parameters[param],) = overlap if parameters[param] == 'True': overlap = BOOLEANS_TRUE.intersection(choices) if len(overlap) == 1: (parameters[param],) = overlap if parameters[param] not in choices: choices_str = ", ".join([to_native(c) for c in choices]) msg = "value of %s must be one of: %s, got: %s" % (param, choices_str, parameters[param]) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) else: msg = "internal error: choices for argument %s are not iterable: %s" % (param, choices) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentTypeError(msg)) def _validate_sub_spec( argument_spec, parameters, prefix="", options_context=None, errors=None, no_log_values=None, unsupported_parameters=None, supported_parameters=None, alias_deprecations=None, ): """Validate sub argument spec. This function is recursive. """ if options_context is None: options_context = [] if errors is None: errors = AnsibleValidationErrorMultiple() if no_log_values is None: no_log_values = set() if unsupported_parameters is None: unsupported_parameters = set() if supported_parameters is None: supported_parameters = dict() for param, value in argument_spec.items(): wanted = value.get('type') if wanted == 'dict' or (wanted == 'list' and value.get('elements', '') == 'dict'): sub_spec = value.get('options') if value.get('apply_defaults', False): if sub_spec is not None: if parameters.get(param) is None: parameters[param] = {} else: continue elif sub_spec is None or param not in parameters or parameters[param] is None: continue # Keep track of context for warning messages options_context.append(param) # Make sure we can iterate over the elements if not isinstance(parameters[param], Sequence) or isinstance(parameters[param], string_types): elements = [parameters[param]] else: elements = parameters[param] for idx, sub_parameters in enumerate(elements): no_log_values.update(set_fallbacks(sub_spec, sub_parameters)) if not isinstance(sub_parameters, dict): errors.append(SubParameterTypeError("value of '%s' must be of type dict or list of dicts" % param)) continue # Set prefix for warning messages new_prefix = prefix + param if wanted == 'list': new_prefix += '[%d]' % idx new_prefix += '.' alias_warnings = [] alias_deprecations_sub = [] try: options_aliases = _handle_aliases(sub_spec, sub_parameters, alias_warnings, alias_deprecations_sub) except (TypeError, ValueError) as e: options_aliases = {} errors.append(AliasError(to_native(e))) for option, alias in alias_warnings: warn('Both option %s%s and its alias %s%s are set.' % (new_prefix, option, new_prefix, alias)) if alias_deprecations is not None: for deprecation in alias_deprecations_sub: alias_deprecations.append({ 'name': '%s%s' % (new_prefix, deprecation['name']), 'version': deprecation.get('version'), 'date': deprecation.get('date'), 'collection_name': deprecation.get('collection_name'), }) try: no_log_values.update(_list_no_log_values(sub_spec, sub_parameters)) except TypeError as te: errors.append(NoLogError(to_native(te))) legal_inputs = _get_legal_inputs(sub_spec, sub_parameters, options_aliases) unsupported_parameters.update( _get_unsupported_parameters( sub_spec, sub_parameters, legal_inputs, options_context, store_supported=supported_parameters, ) ) try: check_mutually_exclusive(value.get('mutually_exclusive'), sub_parameters, options_context) except TypeError as e: errors.append(MutuallyExclusiveError(to_native(e))) no_log_values.update(_set_defaults(sub_spec, sub_parameters, False)) try: check_required_arguments(sub_spec, sub_parameters, options_context) except TypeError as e: errors.append(RequiredError(to_native(e))) _validate_argument_types(sub_spec, sub_parameters, new_prefix, options_context, errors=errors) _validate_argument_values(sub_spec, sub_parameters, options_context, errors=errors) for check in _ADDITIONAL_CHECKS: try: check['func'](value.get(check['attr']), sub_parameters, options_context) except TypeError as e: errors.append(check['err'](to_native(e))) no_log_values.update(_set_defaults(sub_spec, sub_parameters)) # Handle nested specs _validate_sub_spec( sub_spec, sub_parameters, new_prefix, options_context, errors, no_log_values, unsupported_parameters, supported_parameters, alias_deprecations) options_context.pop() def env_fallback(*args, **kwargs): """Load value from environment variable""" for arg in args: if arg in os.environ: return os.environ[arg] raise AnsibleFallbackNotFound def set_fallbacks(argument_spec, parameters): no_log_values = set() for param, value in argument_spec.items(): fallback = value.get('fallback', (None,)) fallback_strategy = fallback[0] fallback_args = [] fallback_kwargs = {} if param not in parameters and fallback_strategy is not None: for item in fallback[1:]: if isinstance(item, dict): fallback_kwargs = item else: fallback_args = item try: fallback_value = fallback_strategy(*fallback_args, **fallback_kwargs) except AnsibleFallbackNotFound: continue else: if value.get('no_log', False) and fallback_value: no_log_values.add(fallback_value) parameters[param] = fallback_value return no_log_values def sanitize_keys(obj, no_log_strings, ignore_keys=frozenset()): """Sanitize the keys in a container object by removing ``no_log`` values from key names. This is a companion function to the :func:`remove_values` function. Similar to that function, we make use of ``deferred_removals`` to avoid hitting maximum recursion depth in cases of large data structures. :arg obj: The container object to sanitize. Non-container objects are returned unmodified. :arg no_log_strings: A set of string values we do not want logged. :kwarg ignore_keys: A set of string values of keys to not sanitize. :returns: An object with sanitized keys. """ deferred_removals = deque() no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings] new_value = _sanitize_keys_conditions(obj, no_log_strings, ignore_keys, deferred_removals) while deferred_removals: old_data, new_data = deferred_removals.popleft() if isinstance(new_data, Mapping): for old_key, old_elem in old_data.items(): if old_key in ignore_keys or old_key.startswith('_ansible'): new_data[old_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals) else: # Sanitize the old key. We take advantage of the sanitizing code in # _remove_values_conditions() rather than recreating it here. new_key = _remove_values_conditions(old_key, no_log_strings, None) new_data[new_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals) else: for elem in old_data: new_elem = _sanitize_keys_conditions(elem, no_log_strings, ignore_keys, deferred_removals) if isinstance(new_data, MutableSequence): new_data.append(new_elem) elif isinstance(new_data, MutableSet): new_data.add(new_elem) else: raise TypeError('Unknown container type encountered when removing private values from keys') return new_value def remove_values(value, no_log_strings): """Remove strings in ``no_log_strings`` from value. If value is a container type, then remove a lot more. Use of ``deferred_removals`` exists, rather than a pure recursive solution, because of the potential to hit the maximum recursion depth when dealing with large amounts of data (see `issue #24560 <https://github.com/ansible/ansible/issues/24560>`_). """ deferred_removals = deque() no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings] new_value = _remove_values_conditions(value, no_log_strings, deferred_removals) while deferred_removals: old_data, new_data = deferred_removals.popleft() if isinstance(new_data, Mapping): for old_key, old_elem in old_data.items(): new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals) new_data[old_key] = new_elem else: for elem in old_data: new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals) if isinstance(new_data, MutableSequence): new_data.append(new_elem) elif isinstance(new_data, MutableSet): new_data.add(new_elem) else: raise TypeError('Unknown container type encountered when removing private values from output') return new_value
37,303
Python
.py
750
38.910667
126
0.619074
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,222
sys_info.py
ansible_ansible/lib/ansible/module_utils/common/sys_info.py
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import platform from ansible.module_utils import distro from ansible.module_utils.common._utils import get_all_subclasses __all__ = ('get_distribution', 'get_distribution_version', 'get_platform_subclass') def get_distribution(): """ Return the name of the distribution the module is running on. :rtype: NativeString or None :returns: Name of the distribution the module is running on This function attempts to determine what distribution the code is running on and return a string representing that value. If the platform is Linux and the distribution cannot be determined, it returns ``OtherLinux``. """ distribution = distro.id().capitalize() if platform.system() == 'Linux': if distribution == 'Amzn': distribution = 'Amazon' elif distribution == 'Rhel': distribution = 'Redhat' elif not distribution: distribution = 'OtherLinux' return distribution def get_distribution_version(): """ Get the version of the distribution the code is running on :rtype: NativeString or None :returns: A string representation of the version of the distribution. If it cannot determine the version, it returns an empty string. If this is not run on a Linux machine it returns None. """ version = None needs_best_version = frozenset(( u'centos', u'debian', )) version = distro.version() distro_id = distro.id() if version is not None: if distro_id in needs_best_version: version_best = distro.version(best=True) # CentoOS maintainers believe only the major version is appropriate # but Ansible users desire minor version information, e.g., 7.5. # https://github.com/ansible/ansible/issues/50141#issuecomment-449452781 if distro_id == u'centos': version = u'.'.join(version_best.split(u'.')[:2]) # Debian does not include minor version in /etc/os-release. # Bug report filed upstream requesting this be added to /etc/os-release # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=931197 if distro_id == u'debian': version = version_best else: version = u'' return version def get_distribution_codename(): """ Return the code name for this Linux Distribution :rtype: NativeString or None :returns: A string representation of the distribution's codename or None if not a Linux distro """ codename = None if platform.system() == 'Linux': # Until this gets merged and we update our bundled copy of distro: # https://github.com/nir0s/distro/pull/230 # Fixes Fedora 28+ not having a code name and Ubuntu Xenial Xerus needing to be "xenial" os_release_info = distro.os_release_info() codename = os_release_info.get('version_codename') if codename is None: codename = os_release_info.get('ubuntu_codename') if codename is None and distro.id() == 'ubuntu': lsb_release_info = distro.lsb_release_info() codename = lsb_release_info.get('codename') if codename is None: codename = distro.codename() if codename == u'': codename = None return codename def get_platform_subclass(cls): """ Finds a subclass implementing desired functionality on the platform the code is running on :arg cls: Class to find an appropriate subclass for :returns: A class that implements the functionality on this platform Some Ansible modules have different implementations depending on the platform they run on. This function is used to select between the various implementations and choose one. You can look at the implementation of the Ansible :ref:`User module<user_module>` module for an example of how to use this. This function replaces ``basic.load_platform_subclass()``. When you port code, you need to change the callers to be explicit about instantiating the class. For instance, code in the Ansible User module changed from:: .. code-block:: python # Old class User: def __new__(cls, args, kwargs): return load_platform_subclass(User, args, kwargs) # New class User: def __new__(cls, *args, **kwargs): new_cls = get_platform_subclass(User) return super(cls, new_cls).__new__(new_cls) """ this_platform = platform.system() distribution = get_distribution() subclass = None # get the most specific superclass for this platform if distribution is not None: for sc in get_all_subclasses(cls): if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform: subclass = sc if subclass is None: for sc in get_all_subclasses(cls): if sc.platform == this_platform and sc.distribution is None: subclass = sc if subclass is None: subclass = cls return subclass
5,436
Python
.py
117
38.529915
112
0.668561
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,223
dict_transformations.py
ansible_ansible/lib/ansible/module_utils/common/dict_transformations.py
# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import re from copy import deepcopy from ansible.module_utils.six.moves.collections_abc import MutableMapping def camel_dict_to_snake_dict(camel_dict, reversible=False, ignore_list=()): """ reversible allows two way conversion of a camelized dict such that snake_dict_to_camel_dict(camel_dict_to_snake_dict(x)) == x This is achieved through mapping e.g. HTTPEndpoint to h_t_t_p_endpoint where the default would be simply http_endpoint, which gets turned into HttpEndpoint if recamelized. ignore_list is used to avoid converting a sub-tree of a dict. This is particularly important for tags, where keys are case-sensitive. We convert the 'Tags' key but nothing below. """ def value_is_list(camel_list): checked_list = [] for item in camel_list: if isinstance(item, dict): checked_list.append(camel_dict_to_snake_dict(item, reversible)) elif isinstance(item, list): checked_list.append(value_is_list(item)) else: checked_list.append(item) return checked_list snake_dict = {} for k, v in camel_dict.items(): if isinstance(v, dict) and k not in ignore_list: snake_dict[_camel_to_snake(k, reversible=reversible)] = camel_dict_to_snake_dict(v, reversible) elif isinstance(v, list) and k not in ignore_list: snake_dict[_camel_to_snake(k, reversible=reversible)] = value_is_list(v) else: snake_dict[_camel_to_snake(k, reversible=reversible)] = v return snake_dict def snake_dict_to_camel_dict(snake_dict, capitalize_first=False): """ Perhaps unexpectedly, snake_dict_to_camel_dict returns dromedaryCase rather than true CamelCase. Passing capitalize_first=True returns CamelCase. The default remains False as that was the original implementation """ def camelize(complex_type, capitalize_first=False): if complex_type is None: return new_type = type(complex_type)() if isinstance(complex_type, dict): for key in complex_type: new_type[_snake_to_camel(key, capitalize_first)] = camelize(complex_type[key], capitalize_first) elif isinstance(complex_type, list): for i in range(len(complex_type)): new_type.append(camelize(complex_type[i], capitalize_first)) else: return complex_type return new_type return camelize(snake_dict, capitalize_first) def _snake_to_camel(snake, capitalize_first=False): if capitalize_first: return ''.join(x.capitalize() or '_' for x in snake.split('_')) else: return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:]) def _camel_to_snake(name, reversible=False): def prepend_underscore_and_lower(m): return '_' + m.group(0).lower() if reversible: upper_pattern = r'[A-Z]' else: # Cope with pluralized abbreviations such as TargetGroupARNs # that would otherwise be rendered target_group_ar_ns upper_pattern = r'[A-Z]{3,}s$' s1 = re.sub(upper_pattern, prepend_underscore_and_lower, name) # Handle when there was nothing before the plural_pattern if s1.startswith("_") and not name.startswith("_"): s1 = s1[1:] if reversible: return s1 # Remainder of solution seems to be https://stackoverflow.com/a/1176023 first_cap_pattern = r'(.)([A-Z][a-z]+)' all_cap_pattern = r'([a-z0-9])([A-Z]+)' s2 = re.sub(first_cap_pattern, r'\1_\2', s1) return re.sub(all_cap_pattern, r'\1_\2', s2).lower() def dict_merge(a, b): """recursively merges dicts. not just simple a['key'] = b['key'], if both a and b have a key whose value is a dict then dict_merge is called on both values and the result stored in the returned dictionary.""" if not isinstance(b, dict): return b result = deepcopy(a) for k, v in b.items(): if k in result and isinstance(result[k], dict): result[k] = dict_merge(result[k], v) else: result[k] = deepcopy(v) return result def recursive_diff(dict1, dict2): """Recursively diff two dictionaries Raises ``TypeError`` for incorrect argument type. :arg dict1: Dictionary to compare against. :arg dict2: Dictionary to compare with ``dict1``. :return: Tuple of dictionaries of differences or ``None`` if there are no differences. """ if not all((isinstance(item, MutableMapping) for item in (dict1, dict2))): raise TypeError("Unable to diff 'dict1' %s and 'dict2' %s. " "Both must be a dictionary." % (type(dict1), type(dict2))) left = dict((k, v) for (k, v) in dict1.items() if k not in dict2) right = dict((k, v) for (k, v) in dict2.items() if k not in dict1) for k in (set(dict1.keys()) & set(dict2.keys())): if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): result = recursive_diff(dict1[k], dict2[k]) if result: left[k] = result[0] right[k] = result[1] elif dict1[k] != dict2[k]: left[k] = dict1[k] right[k] = dict2[k] if left or right: return left, right return None
5,525
Python
.py
119
38.689076
112
0.642964
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,224
file.py
ansible_ansible/lib/ansible/module_utils/common/file.py
# Copyright (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import os import stat import re FILE_ATTRIBUTES = { 'A': 'noatime', 'a': 'append', 'c': 'compressed', 'C': 'nocow', 'd': 'nodump', 'D': 'dirsync', 'e': 'extents', 'E': 'encrypted', 'h': 'blocksize', 'i': 'immutable', 'I': 'indexed', 'j': 'journalled', 'N': 'inline', 's': 'zero', 'S': 'synchronous', 't': 'notail', 'T': 'blockroot', 'u': 'undelete', 'X': 'compressedraw', 'Z': 'compresseddirty', } # Used for parsing symbolic file perms MODE_OPERATOR_RE = re.compile(r'[+=-]') USERS_RE = re.compile(r'[^ugo]') PERMS_RE = re.compile(r'[^rwxXstugo]') S_IRANY = 0o0444 # read by user, group, others S_IWANY = 0o0222 # write by user, group, others S_IXANY = 0o0111 # execute by user, group, others S_IRWU_RWG_RWO = S_IRANY | S_IWANY # read, write by user, group, others S_IRWU_RG_RO = S_IRANY | stat.S_IWUSR # read by user, group, others and write only by user S_IRWXU_RXG_RXO = S_IRANY | S_IXANY | stat.S_IWUSR # read, execute by user, group, others and write only by user _PERM_BITS = 0o7777 # file mode permission bits _EXEC_PERM_BITS = S_IXANY # execute permission bits _DEFAULT_PERM = S_IRWU_RWG_RWO # default file permission bits def is_executable(path): # This function's signature needs to be repeated # as the first line of its docstring. # This method is reused by the basic module, # the repetition helps the basic module's html documentation come out right. # http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_docstring_signature """is_executable(path) is the given path executable? :arg path: The path of the file to check. Limitations: * Does not account for FSACLs. * Most times we really want to know "Can the current user execute this file". This function does not tell us that, only if any execute bit is set. """ # These are all bitfields so first bitwise-or all the permissions we're # looking for, then bitwise-and with the file's mode to determine if any # execute bits are set. return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE]) def format_attributes(attributes): attribute_list = [FILE_ATTRIBUTES.get(attr) for attr in attributes if attr in FILE_ATTRIBUTES] return attribute_list def get_flags_from_attributes(attributes): flags = [key for key, attr in FILE_ATTRIBUTES.items() if attr in attributes] return ''.join(flags) def get_file_arg_spec(): arg_spec = dict( mode=dict(type='raw'), owner=dict(), group=dict(), seuser=dict(), serole=dict(), selevel=dict(), setype=dict(), attributes=dict(aliases=['attr']), ) return arg_spec
2,982
Python
.py
77
34.324675
113
0.666205
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,225
collections.py
ansible_ansible/lib/ansible/module_utils/common/collections.py
# Copyright: (c) 2018, Sviatoslav Sydorenko <ssydoren@redhat.com> # Copyright: (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """Collection of low-level utility functions.""" from __future__ import annotations from ansible.module_utils.six import binary_type, text_type from ansible.module_utils.six.moves.collections_abc import Hashable, Mapping, MutableMapping, Sequence # pylint: disable=unused-import class ImmutableDict(Hashable, Mapping): """Dictionary that cannot be updated""" def __init__(self, *args, **kwargs): self._store = dict(*args, **kwargs) def __getitem__(self, key): return self._store[key] def __iter__(self): return self._store.__iter__() def __len__(self): return self._store.__len__() def __hash__(self): return hash(frozenset(self.items())) def __eq__(self, other): try: if self.__hash__() == hash(other): return True except TypeError: pass return False def __repr__(self): return 'ImmutableDict({0})'.format(repr(self._store)) def union(self, overriding_mapping): """ Create an ImmutableDict as a combination of the original and overriding_mapping :arg overriding_mapping: A Mapping of replacement and additional items :return: A copy of the ImmutableDict with key-value pairs from the overriding_mapping added If any of the keys in overriding_mapping are already present in the original ImmutableDict, the overriding_mapping item replaces the one in the original ImmutableDict. """ return ImmutableDict(self._store, **overriding_mapping) def difference(self, subtractive_iterable): """ Create an ImmutableDict as a combination of the original minus keys in subtractive_iterable :arg subtractive_iterable: Any iterable containing keys that should not be present in the new ImmutableDict :return: A copy of the ImmutableDict with keys from the subtractive_iterable removed """ remove_keys = frozenset(subtractive_iterable) keys = (k for k in self._store.keys() if k not in remove_keys) return ImmutableDict((k, self._store[k]) for k in keys) def is_string(seq): """Identify whether the input has a string-like type (including bytes).""" # AnsibleVaultEncryptedUnicode inherits from Sequence, but is expected to be a string like object return isinstance(seq, (text_type, binary_type)) or getattr(seq, '__ENCRYPTED__', False) def is_iterable(seq, include_strings=False): """Identify whether the input is an iterable.""" if not include_strings and is_string(seq): return False try: iter(seq) return True except TypeError: return False def is_sequence(seq, include_strings=False): """Identify whether the input is a sequence. Strings and bytes are not sequences here, unless ``include_string`` is ``True``. Non-indexable things are never of a sequence type. """ if not include_strings and is_string(seq): return False return isinstance(seq, Sequence) def count(seq): """Returns a dictionary with the number of appearances of each element of the iterable. Resembles the collections.Counter class functionality. It is meant to be used when the code is run on Python 2.6.* where collections.Counter is not available. It should be deprecated and replaced when support for Python < 2.7 is dropped. """ if not is_iterable(seq): raise Exception('Argument provided is not an iterable') counters = dict() for elem in seq: counters[elem] = counters.get(elem, 0) + 1 return counters
3,850
Python
.py
81
40.728395
135
0.688954
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,226
process.py
ansible_ansible/lib/ansible/module_utils/common/process.py
# Copyright (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import os from ansible.module_utils.common.file import is_executable from ansible.module_utils.common.warnings import deprecate def get_bin_path(arg, opt_dirs=None, required=None): """ Find system executable in PATH. Raises ValueError if the executable is not found. :param arg: the executable to find :type arg: string :param opt_dirs: optional list of directories to search in addition to PATH :type opt_dirs: list of strings :param required: DEPRECATED. This parameter will be removed in 2.21 :type required: boolean :returns: path to arg (should be abs path unless PATH or opt_dirs are relative paths) :raises: ValueError: if arg is not found In addition to PATH and opt_dirs, this function also looks through /sbin, /usr/sbin and /usr/local/sbin. A lot of modules, especially for gathering facts, depend on this behaviour. """ if required is not None: deprecate( msg="The `required` parameter in `get_bin_path` API is deprecated.", version="2.21", collection_name="ansible.builtin", ) paths = [] sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] opt_dirs = [] if opt_dirs is None else opt_dirs # Construct possible paths with precedence # passed in paths for d in opt_dirs: if d is not None and os.path.exists(d): paths.append(d) # system configured paths paths += os.environ.get('PATH', '').split(os.pathsep) # existing /sbin dirs, if not there already for p in sbin_paths: if p not in paths and os.path.exists(p): paths.append(p) # Search for binary bin_path = None for d in paths: if not d: continue path = os.path.join(d, arg) if os.path.exists(path) and not os.path.isdir(path) and is_executable(path): # fist found wins bin_path = path break if bin_path is None: raise ValueError('Failed to find required executable "%s" in paths: %s' % (arg, os.pathsep.join(paths))) return bin_path
2,281
Python
.py
53
36.358491
117
0.669526
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,227
validation.py
ansible_ansible/lib/ansible/module_utils/common/validation.py
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import decimal import json import os import re from ast import literal_eval from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.collections import is_iterable from ansible.module_utils.common.text.converters import jsonify from ansible.module_utils.common.text.formatters import human_to_bytes from ansible.module_utils.common.warnings import deprecate from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six import ( binary_type, string_types, text_type, ) def count_terms(terms, parameters): """Count the number of occurrences of a key in a given dictionary :arg terms: String or iterable of values to check :arg parameters: Dictionary of parameters :returns: An integer that is the number of occurrences of the terms values in the provided dictionary. """ if not is_iterable(terms): terms = [terms] return len(set(terms).intersection(parameters)) def safe_eval(value, locals=None, include_exceptions=False): deprecate( "The safe_eval function should not be used.", version="2.21", ) # do not allow method calls to modules if not isinstance(value, string_types): # already templated to a datavaluestructure, perhaps? if include_exceptions: return (value, None) return value if re.search(r'\w\.\w+\(', value): if include_exceptions: return (value, None) return value # do not allow imports if re.search(r'import \w+', value): if include_exceptions: return (value, None) return value try: result = literal_eval(value) if include_exceptions: return (result, None) else: return result except Exception as e: if include_exceptions: return (value, e) return value def check_mutually_exclusive(terms, parameters, options_context=None): """Check mutually exclusive terms against argument parameters Accepts a single list or list of lists that are groups of terms that should be mutually exclusive with one another :arg terms: List of mutually exclusive parameters :arg parameters: Dictionary of parameters :kwarg options_context: List of strings of parent key names if ``terms`` are in a sub spec. :returns: Empty list or raises :class:`TypeError` if the check fails. """ results = [] if terms is None: return results for check in terms: count = count_terms(check, parameters) if count > 1: results.append(check) if results: full_list = ['|'.join(check) for check in results] msg = "parameters are mutually exclusive: %s" % ', '.join(full_list) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) raise TypeError(to_native(msg)) return results def check_required_one_of(terms, parameters, options_context=None): """Check each list of terms to ensure at least one exists in the given module parameters Accepts a list of lists or tuples :arg terms: List of lists of terms to check. For each list of terms, at least one is required. :arg parameters: Dictionary of parameters :kwarg options_context: List of strings of parent key names if ``terms`` are in a sub spec. :returns: Empty list or raises :class:`TypeError` if the check fails. """ results = [] if terms is None: return results for term in terms: count = count_terms(term, parameters) if count == 0: results.append(term) if results: for term in results: msg = "one of the following is required: %s" % ', '.join(term) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) raise TypeError(to_native(msg)) return results def check_required_together(terms, parameters, options_context=None): """Check each list of terms to ensure every parameter in each list exists in the given parameters. Accepts a list of lists or tuples. :arg terms: List of lists of terms to check. Each list should include parameters that are all required when at least one is specified in the parameters. :arg parameters: Dictionary of parameters :kwarg options_context: List of strings of parent key names if ``terms`` are in a sub spec. :returns: Empty list or raises :class:`TypeError` if the check fails. """ results = [] if terms is None: return results for term in terms: counts = [count_terms(field, parameters) for field in term] non_zero = [c for c in counts if c > 0] if len(non_zero) > 0: if 0 in counts: results.append(term) if results: for term in results: msg = "parameters are required together: %s" % ', '.join(term) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) raise TypeError(to_native(msg)) return results def check_required_by(requirements, parameters, options_context=None): """For each key in requirements, check the corresponding list to see if they exist in parameters. Accepts a single string or list of values for each key. :arg requirements: Dictionary of requirements :arg parameters: Dictionary of parameters :kwarg options_context: List of strings of parent key names if ``requirements`` are in a sub spec. :returns: Empty dictionary or raises :class:`TypeError` if the """ result = {} if requirements is None: return result for (key, value) in requirements.items(): if key not in parameters or parameters[key] is None: continue result[key] = [] # Support strings (single-item lists) if isinstance(value, string_types): value = [value] for required in value: if required not in parameters or parameters[required] is None: result[key].append(required) if result: for key, missing in result.items(): if len(missing) > 0: msg = "missing parameter(s) required by '%s': %s" % (key, ', '.join(missing)) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) raise TypeError(to_native(msg)) return result def check_required_arguments(argument_spec, parameters, options_context=None): """Check all parameters in argument_spec and return a list of parameters that are required but not present in parameters. Raises :class:`TypeError` if the check fails :arg argument_spec: Argument spec dictionary containing all parameters and their specification :arg parameters: Dictionary of parameters :kwarg options_context: List of strings of parent key names if ``argument_spec`` are in a sub spec. :returns: Empty list or raises :class:`TypeError` if the check fails. """ missing = [] if argument_spec is None: return missing for (k, v) in argument_spec.items(): required = v.get('required', False) if required and k not in parameters: missing.append(k) if missing: msg = "missing required arguments: %s" % ", ".join(sorted(missing)) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) raise TypeError(to_native(msg)) return missing def check_required_if(requirements, parameters, options_context=None): """Check parameters that are conditionally required Raises :class:`TypeError` if the check fails :arg requirements: List of lists specifying a parameter, value, parameters required when the given parameter is the specified value, and optionally a boolean indicating any or all parameters are required. :Example: .. code-block:: python required_if=[ ['state', 'present', ('path',), True], ['someint', 99, ('bool_param', 'string_param')], ] :arg parameters: Dictionary of parameters :returns: Empty list or raises :class:`TypeError` if the check fails. The results attribute of the exception contains a list of dictionaries. Each dictionary is the result of evaluating each item in requirements. Each return dictionary contains the following keys: :key missing: List of parameters that are required but missing :key requires: 'any' or 'all' :key parameter: Parameter name that has the requirement :key value: Original value of the parameter :key requirements: Original required parameters :Example: .. code-block:: python [ { 'parameter': 'someint', 'value': 99 'requirements': ('bool_param', 'string_param'), 'missing': ['string_param'], 'requires': 'all', } ] :kwarg options_context: List of strings of parent key names if ``requirements`` are in a sub spec. """ results = [] if requirements is None: return results for req in requirements: missing = {} missing['missing'] = [] max_missing_count = 0 is_one_of = False if len(req) == 4: key, val, requirements, is_one_of = req else: key, val, requirements = req # is_one_of is True at least one requirement should be # present, else all requirements should be present. if is_one_of: max_missing_count = len(requirements) missing['requires'] = 'any' else: missing['requires'] = 'all' if key in parameters and parameters[key] == val: for check in requirements: count = count_terms(check, parameters) if count == 0: missing['missing'].append(check) if len(missing['missing']) and len(missing['missing']) >= max_missing_count: missing['parameter'] = key missing['value'] = val missing['requirements'] = requirements results.append(missing) if results: for missing in results: msg = "%s is %s but %s of the following are missing: %s" % ( missing['parameter'], missing['value'], missing['requires'], ', '.join(missing['missing'])) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) raise TypeError(to_native(msg)) return results def check_missing_parameters(parameters, required_parameters=None): """This is for checking for required params when we can not check via argspec because we need more information than is simply given in the argspec. Raises :class:`TypeError` if any required parameters are missing :arg parameters: Dictionary of parameters :arg required_parameters: List of parameters to look for in the given parameters. :returns: Empty list or raises :class:`TypeError` if the check fails. """ missing_params = [] if required_parameters is None: return missing_params for param in required_parameters: if not parameters.get(param): missing_params.append(param) if missing_params: msg = "missing required arguments: %s" % ', '.join(missing_params) raise TypeError(to_native(msg)) return missing_params # FIXME: The param and prefix parameters here are coming from AnsibleModule._check_type_string() # which is using those for the warning messaged based on string conversion warning settings. # Not sure how to deal with that here since we don't have config state to query. def check_type_str(value, allow_conversion=True, param=None, prefix=''): """Verify that the value is a string or convert to a string. Since unexpected changes can sometimes happen when converting to a string, ``allow_conversion`` controls whether or not the value will be converted or a TypeError will be raised if the value is not a string and would be converted :arg value: Value to validate or convert to a string :arg allow_conversion: Whether to convert the string and return it or raise a TypeError :returns: Original value if it is a string, the value converted to a string if allow_conversion=True, or raises a TypeError if allow_conversion=False. """ if isinstance(value, string_types): return value if allow_conversion and value is not None: return to_native(value, errors='surrogate_or_strict') msg = "'{0!r}' is not a string and conversion is not allowed".format(value) raise TypeError(to_native(msg)) def check_type_list(value): """Verify that the value is a list or convert to a list A comma separated string will be split into a list. Raises a :class:`TypeError` if unable to convert to a list. :arg value: Value to validate or convert to a list :returns: Original value if it is already a list, single item list if a float, int, or string without commas, or a multi-item list if a comma-delimited string. """ if isinstance(value, list): return value if isinstance(value, string_types): return value.split(",") elif isinstance(value, int) or isinstance(value, float): return [str(value)] raise TypeError('%s cannot be converted to a list' % type(value)) def check_type_dict(value): """Verify that value is a dict or convert it to a dict and return it. Raises :class:`TypeError` if unable to convert to a dict :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2`` or ``k1=v2 k2=v2``. :returns: value converted to a dictionary """ if isinstance(value, dict): return value if isinstance(value, string_types): if value.startswith("{"): try: return json.loads(value) except Exception: try: result = literal_eval(value) except Exception: pass else: if isinstance(result, dict): return result raise TypeError('unable to evaluate string as dictionary') elif '=' in value: fields = [] field_buffer = [] in_quote = False in_escape = False for c in value.strip(): if in_escape: field_buffer.append(c) in_escape = False elif c == '\\': in_escape = True elif not in_quote and c in ('\'', '"'): in_quote = c elif in_quote and in_quote == c: in_quote = False elif not in_quote and c in (',', ' '): field = ''.join(field_buffer) if field: fields.append(field) field_buffer = [] else: field_buffer.append(c) field = ''.join(field_buffer) if field: fields.append(field) try: return dict(x.split("=", 1) for x in fields) except ValueError: # no "=" to split on: "k1=v1, k2" raise TypeError('unable to evaluate string in the "key=value" format as dictionary') else: raise TypeError("dictionary requested, could not parse JSON or key=value") raise TypeError('%s cannot be converted to a dict' % type(value)) def check_type_bool(value): """Verify that the value is a bool or convert it to a bool and return it. Raises :class:`TypeError` if unable to convert to a bool :arg value: String, int, or float to convert to bool. Valid booleans include: '1', 'on', 1, '0', 0, 'n', 'f', 'false', 'true', 'y', 't', 'yes', 'no', 'off' :returns: Boolean True or False """ if isinstance(value, bool): return value if isinstance(value, string_types) or isinstance(value, (int, float)): return boolean(value) raise TypeError('%s cannot be converted to a bool' % type(value)) def check_type_int(value): """Verify that the value is an integer and return it or convert the value to an integer and return it Raises :class:`TypeError` if unable to convert to an int :arg value: String or int to convert of verify :return: int of given value """ if not isinstance(value, int): try: if (decimal_value := decimal.Decimal(value)) != (int_value := int(decimal_value)): raise ValueError("Significant decimal part found") else: value = int_value except (decimal.DecimalException, TypeError, ValueError) as e: raise TypeError(f'"{value!r}" cannot be converted to an int') from e return value def check_type_float(value): """Verify that value is a float or convert it to a float and return it Raises :class:`TypeError` if unable to convert to a float :arg value: float, int, str, or bytes to verify or convert and return. :returns: float of given value. """ if not isinstance(value, float): try: value = float(value) except (TypeError, ValueError) as e: raise TypeError(f'{type(value)} cannot be converted to a float') return value def check_type_path(value,): """Verify the provided value is a string or convert it to a string, then return the expanded path """ value = check_type_str(value) return os.path.expanduser(os.path.expandvars(value)) def check_type_raw(value): """Returns the raw value""" return value def check_type_bytes(value): """Convert a human-readable string value to bytes Raises :class:`TypeError` if unable to convert the value """ try: return human_to_bytes(value) except ValueError: raise TypeError('%s cannot be converted to a Byte value' % type(value)) def check_type_bits(value): """Convert a human-readable string bits value to bits in integer. Example: ``check_type_bits('1Mb')`` returns integer 1048576. Raises :class:`TypeError` if unable to convert the value. """ try: return human_to_bytes(value, isbits=True) except ValueError: raise TypeError('%s cannot be converted to a Bit value' % type(value)) def check_type_jsonarg(value): """Return a jsonified string. Sometimes the controller turns a json string into a dict/list so transform it back into json here Raises :class:`TypeError` if unable to convert the value """ if isinstance(value, (text_type, binary_type)): return value.strip() elif isinstance(value, (list, tuple, dict)): return jsonify(value) raise TypeError('%s cannot be converted to a json string' % type(value))
19,630
Python
.py
449
35.135857
107
0.637438
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,228
sentinel.py
ansible_ansible/lib/ansible/module_utils/common/sentinel.py
# Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations class Sentinel: """ Object which can be used to mark whether an entry as being special A sentinel value demarcates a value or marks an entry as having a special meaning. In C, the Null byte is used as a sentinel for the end of a string. In Python, None is often used as a Sentinel in optional parameters to mean that the parameter was not set by the user. You should use None as a Sentinel value any Python code where None is not a valid entry. If None is a valid entry, though, then you need to create a different value, which is the purpose of this class. Example of using Sentinel as a default parameter value:: def confirm_big_red_button(tristate=Sentinel): if tristate is Sentinel: print('You must explicitly press the big red button to blow up the base') elif tristate is True: print('Countdown to destruction activated') elif tristate is False: print('Countdown stopped') elif tristate is None: print('Waiting for more input') Example of using Sentinel to tell whether a dict which has a default value has been changed:: values = {'one': Sentinel, 'two': Sentinel} defaults = {'one': 1, 'two': 2} # [.. Other code which does things including setting a new value for 'one' ..] values['one'] = None # [..] print('You made changes to:') for key, value in values.items(): if value is Sentinel: continue print('%s: %s' % (key, value) """ def __new__(cls): """ Return the cls itself. This makes both equality and identity True for comparing the class to an instance of the class, preventing common usage errors. Preferred usage:: a = Sentinel if a is Sentinel: print('Sentinel value') However, these are True as well, eliminating common usage errors:: if Sentinel is Sentinel(): print('Sentinel value') if Sentinel == Sentinel(): print('Sentinel value') """ return cls
2,372
Python
.py
49
38.571429
98
0.627927
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,229
yaml.py
ansible_ansible/lib/ansible/module_utils/common/yaml.py
# (c) 2020 Matt Martz <matt@sivel.net> # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """ This file provides ease of use shortcuts for loading and dumping YAML, preferring the YAML compiled C extensions to reduce duplicated code. """ from __future__ import annotations from functools import partial as _partial HAS_LIBYAML = False try: import yaml as _yaml except ImportError: HAS_YAML = False else: HAS_YAML = True if HAS_YAML: try: from yaml import CSafeLoader as SafeLoader from yaml import CSafeDumper as SafeDumper from yaml.cyaml import CParser as Parser # type: ignore[attr-defined] # pylint: disable=unused-import HAS_LIBYAML = True except (ImportError, AttributeError): from yaml import SafeLoader # type: ignore[assignment] from yaml import SafeDumper # type: ignore[assignment] from yaml.parser import Parser # type: ignore[assignment] # pylint: disable=unused-import yaml_load = _partial(_yaml.load, Loader=SafeLoader) yaml_load_all = _partial(_yaml.load_all, Loader=SafeLoader) yaml_dump = _partial(_yaml.dump, Dumper=SafeDumper) yaml_dump_all = _partial(_yaml.dump_all, Dumper=SafeDumper) else: SafeLoader = object # type: ignore[assignment,misc] SafeDumper = object # type: ignore[assignment,misc] Parser = object # type: ignore[assignment,misc] yaml_load = None # type: ignore[assignment] yaml_load_all = None # type: ignore[assignment] yaml_dump = None # type: ignore[assignment] yaml_dump_all = None # type: ignore[assignment]
1,648
Python
.py
37
40.027027
111
0.723923
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,230
locale.py
ansible_ansible/lib/ansible/module_utils/common/locale.py
# Copyright (c), Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations from ansible.module_utils.common.text.converters import to_native def get_best_parsable_locale(module, preferences=None, raise_on_locale=False): """ Attempts to return the best possible locale for parsing output in English useful for scraping output with i18n tools. When this raises an exception and the caller wants to continue, it should use the 'C' locale. :param module: an AnsibleModule instance :param preferences: A list of preferred locales, in order of preference :param raise_on_locale: boolean that determines if we raise exception or not due to locale CLI issues :returns: The first matched preferred locale or 'C' which is the default """ found = 'C' # default posix, its ascii but always there try: locale = module.get_bin_path("locale") if not locale: # not using required=true as that forces fail_json raise RuntimeWarning("Could not find 'locale' tool") available = [] if preferences is None: # new POSIX standard or English cause those are messages core team expects # yes, the last 2 are the same but some systems are weird preferences = ['C.utf8', 'C.UTF-8', 'en_US.utf8', 'en_US.UTF-8', 'C', 'POSIX'] rc, out, err = module.run_command([locale, '-a']) if rc == 0: if out: available = out.strip().splitlines() else: raise RuntimeWarning("No output from locale, rc=%s: %s" % (rc, to_native(err))) else: raise RuntimeWarning("Unable to get locale information, rc=%s: %s" % (rc, to_native(err))) if available: for pref in preferences: if pref in available: found = pref break except RuntimeWarning as e: if raise_on_locale: raise else: module.debug('Failed to get locale information: %s' % to_native(e)) module.debug('Matched preferred locale to: %s' % found) return found
2,296
Python
.py
46
39.565217
106
0.625671
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,231
_utils.py
ansible_ansible/lib/ansible/module_utils/common/_utils.py
# Copyright (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """ Modules in _utils are waiting to find a better home. If you need to use them, be prepared for them to move to a different location in the future. """ from __future__ import annotations def get_all_subclasses(cls): """ Recursively search and find all subclasses of a given class :arg cls: A python class :rtype: set :returns: The set of python classes which are the subclasses of `cls`. In python, you can use a class's :py:meth:`__subclasses__` method to determine what subclasses of a class exist. However, `__subclasses__` only goes one level deep. This function searches each child class's `__subclasses__` method to find all of the descendent classes. It then returns an iterable of the descendent classes. """ # Retrieve direct subclasses subclasses = set(cls.__subclasses__()) to_visit = list(subclasses) # Then visit all subclasses while to_visit: for sc in to_visit: # The current class is now visited, so remove it from list to_visit.remove(sc) # Appending all subclasses to visit and keep a reference of available class for ssc in sc.__subclasses__(): if ssc not in subclasses: to_visit.append(ssc) subclasses.add(ssc) return subclasses
1,482
Python
.py
32
39.625
106
0.680055
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,232
_collections_compat.py
ansible_ansible/lib/ansible/module_utils/common/_collections_compat.py
# Copyright (c), Sviatoslav Sydorenko <ssydoren@redhat.com> 2018 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """Collections ABC import shim. Use `ansible.module_utils.six.moves.collections_abc` instead, which has been available since ansible-core 2.11. This module exists only for backwards compatibility. """ from __future__ import annotations # Although this was originally intended for internal use only, it has wide adoption in collections. # This is due in part to sanity tests previously recommending its use over `collections` imports. from ansible.module_utils.six.moves.collections_abc import ( # pylint: disable=unused-import MappingView, ItemsView, KeysView, ValuesView, Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet, Container, Hashable, Sized, Callable, Iterable, Iterator, )
930
Python
.py
24
35.458333
111
0.770764
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,233
json.py
ansible_ansible/lib/ansible/module_utils/common/json.py
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import json import datetime from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.six.moves.collections_abc import Mapping from ansible.module_utils.common.collections import is_sequence def _is_unsafe(value): return getattr(value, '__UNSAFE__', False) and not getattr(value, '__ENCRYPTED__', False) def _is_vault(value): return getattr(value, '__ENCRYPTED__', False) def _preprocess_unsafe_encode(value): """Recursively preprocess a data structure converting instances of ``AnsibleUnsafe`` into their JSON dict representations Used in ``AnsibleJSONEncoder.iterencode`` """ if _is_unsafe(value): value = {'__ansible_unsafe': to_text(value, errors='surrogate_or_strict', nonstring='strict')} elif is_sequence(value): value = [_preprocess_unsafe_encode(v) for v in value] elif isinstance(value, Mapping): value = dict((k, _preprocess_unsafe_encode(v)) for k, v in value.items()) return value def json_dump(structure): return json.dumps(structure, cls=AnsibleJSONEncoder, sort_keys=True, indent=4) class AnsibleJSONEncoder(json.JSONEncoder): """ Simple encoder class to deal with JSON encoding of Ansible internal types """ def __init__(self, preprocess_unsafe=False, vault_to_text=False, **kwargs): self._preprocess_unsafe = preprocess_unsafe self._vault_to_text = vault_to_text super(AnsibleJSONEncoder, self).__init__(**kwargs) # NOTE: ALWAYS inform AWS/Tower when new items get added as they consume them downstream via a callback def default(self, o): if getattr(o, '__ENCRYPTED__', False): # vault object if self._vault_to_text: value = to_text(o, errors='surrogate_or_strict') else: value = {'__ansible_vault': to_text(o._ciphertext, errors='surrogate_or_strict', nonstring='strict')} elif getattr(o, '__UNSAFE__', False): # unsafe object, this will never be triggered, see ``AnsibleJSONEncoder.iterencode`` value = {'__ansible_unsafe': to_text(o, errors='surrogate_or_strict', nonstring='strict')} elif isinstance(o, Mapping): # hostvars and other objects value = dict(o) elif isinstance(o, (datetime.date, datetime.datetime)): # date object value = o.isoformat() else: # use default encoder value = super(AnsibleJSONEncoder, self).default(o) return value def iterencode(self, o, **kwargs): """Custom iterencode, primarily design to handle encoding ``AnsibleUnsafe`` as the ``AnsibleUnsafe`` subclasses inherit from string types and ``json.JSONEncoder`` does not support custom encoders for string types """ if self._preprocess_unsafe: o = _preprocess_unsafe_encode(o) return super(AnsibleJSONEncoder, self).iterencode(o, **kwargs)
3,188
Python
.py
64
42.375
117
0.673969
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,234
respawn.py
ansible_ansible/lib/ansible/module_utils/common/respawn.py
# Copyright: (c) 2021, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import os import subprocess import sys import typing as t from ansible.module_utils.common.text.converters import to_bytes def has_respawned(): return hasattr(sys.modules['__main__'], '_respawned') def respawn_module(interpreter_path) -> t.NoReturn: """ Respawn the currently-running Ansible Python module under the specified Python interpreter. Ansible modules that require libraries that are typically available only under well-known interpreters (eg, ``apt``, ``dnf``) can use bespoke logic to determine the libraries they need are not available, then call `respawn_module` to re-execute the current module under a different interpreter and exit the current process when the new subprocess has completed. The respawned process inherits only stdout/stderr from the current process. Only a single respawn is allowed. ``respawn_module`` will fail on nested respawns. Modules are encouraged to call `has_respawned()` to defensively guide behavior before calling ``respawn_module``, and to ensure that the target interpreter exists, as ``respawn_module`` will not fail gracefully. :arg interpreter_path: path to a Python interpreter to respawn the current module """ if has_respawned(): raise Exception('module has already been respawned') # FUTURE: we need a safe way to log that a respawn has occurred for forensic/debug purposes payload = _create_payload() stdin_read, stdin_write = os.pipe() os.write(stdin_write, to_bytes(payload)) os.close(stdin_write) rc = subprocess.call([interpreter_path, '--'], stdin=stdin_read) sys.exit(rc) # pylint: disable=ansible-bad-function def probe_interpreters_for_module(interpreter_paths, module_name): """ Probes a supplied list of Python interpreters, returning the first one capable of importing the named module. This is useful when attempting to locate a "system Python" where OS-packaged utility modules are located. :arg interpreter_paths: iterable of paths to Python interpreters. The paths will be probed in order, and the first path that exists and can successfully import the named module will be returned (or ``None`` if probing fails for all supplied paths). :arg module_name: fully-qualified Python module name to probe for (eg, ``selinux``) """ for interpreter_path in interpreter_paths: if not os.path.exists(interpreter_path): continue try: rc = subprocess.call([interpreter_path, '-c', 'import {0}'.format(module_name)]) if rc == 0: return interpreter_path except Exception: continue return None def _create_payload(): from ansible.module_utils import basic smuggled_args = getattr(basic, '_ANSIBLE_ARGS') if not smuggled_args: raise Exception('unable to access ansible.module_utils.basic._ANSIBLE_ARGS (not launched by AnsiballZ?)') module_fqn = sys.modules['__main__']._module_fqn modlib_path = sys.modules['__main__']._modlib_path respawn_code_template = """ import runpy import sys module_fqn = {module_fqn!r} modlib_path = {modlib_path!r} smuggled_args = {smuggled_args!r} if __name__ == '__main__': sys.path.insert(0, modlib_path) from ansible.module_utils import basic basic._ANSIBLE_ARGS = smuggled_args runpy.run_module(module_fqn, init_globals=dict(_respawned=True), run_name='__main__', alter_sys=True) """ respawn_code = respawn_code_template.format(module_fqn=module_fqn, modlib_path=modlib_path, smuggled_args=smuggled_args.strip()) return respawn_code
3,799
Python
.py
73
46.821918
132
0.725014
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,235
converters.py
ansible_ansible/lib/ansible/module_utils/common/text/converters.py
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com> # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import codecs import datetime import json from ansible.module_utils.six.moves.collections_abc import Set from ansible.module_utils.six import ( PY3, binary_type, iteritems, text_type, ) try: codecs.lookup_error('surrogateescape') HAS_SURROGATEESCAPE = True except LookupError: HAS_SURROGATEESCAPE = False _COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_replace', 'surrogate_or_strict', 'surrogate_then_replace')) def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'): """Make sure that a string is a byte string :arg obj: An object to make sure is a byte string. In most cases this will be either a text string or a byte string. However, with ``nonstring='simplerepr'``, this can be used as a traceback-free version of ``str(obj)``. :kwarg encoding: The encoding to use to transform from a text string to a byte string. Defaults to using 'utf-8'. :kwarg errors: The error handler to use if the text string is not encodable using the specified encoding. Any valid `codecs error handler <https://docs.python.org/3/library/codecs.html#codec-base-classes>`_ may be specified. There are three additional error strategies specifically aimed at helping people to port code. The first two are: :surrogate_or_strict: Will use ``surrogateescape`` if it is a valid handler, otherwise it will use ``strict`` :surrogate_or_replace: Will use ``surrogateescape`` if it is a valid handler, otherwise it will use ``replace``. Because ``surrogateescape`` was added in Python3 this usually means that Python3 will use ``surrogateescape`` and Python2 will use the fallback error handler. Note that the code checks for ``surrogateescape`` when the module is imported. If you have a backport of ``surrogateescape`` for Python2, be sure to register the error handler prior to importing this module. The last error handler is: :surrogate_then_replace: Will use ``surrogateescape`` if it is a valid handler. If encoding with ``surrogateescape`` would traceback, surrogates are first replaced with a replacement characters and then the string is encoded using ``replace`` (which replaces the rest of the nonencodable bytes). If ``surrogateescape`` is not present it will simply use ``replace``. (Added in Ansible 2.3) This strategy is designed to never traceback when it attempts to encode a string. The default until Ansible-2.2 was ``surrogate_or_replace`` From Ansible-2.3 onwards, the default is ``surrogate_then_replace``. :kwarg nonstring: The strategy to use if a nonstring is specified in ``obj``. Default is 'simplerepr'. Valid values are: :simplerepr: The default. This takes the ``str`` of the object and then returns the bytes version of that string. :empty: Return an empty byte string :passthru: Return the object passed in :strict: Raise a :exc:`TypeError` :returns: Typically this returns a byte string. If a nonstring object is passed in this may be a different type depending on the strategy specified by nonstring. This will never return a text string. .. note:: If passed a byte string, this function does not check that the string is valid in the specified encoding. If it's important that the byte string is in the specified encoding do:: encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8') .. version_changed:: 2.3 Added the ``surrogate_then_replace`` error handler and made it the default error handler. """ if isinstance(obj, binary_type): return obj # We're given a text string # If it has surrogates, we know because it will decode original_errors = errors if errors in _COMPOSED_ERROR_HANDLERS: if HAS_SURROGATEESCAPE: errors = 'surrogateescape' elif errors == 'surrogate_or_strict': errors = 'strict' else: errors = 'replace' if isinstance(obj, text_type): try: # Try this first as it's the fastest return obj.encode(encoding, errors) except UnicodeEncodeError: if original_errors in (None, 'surrogate_then_replace'): # We should only reach this if encoding was non-utf8 original_errors was # surrogate_then_escape and errors was surrogateescape # Slow but works return_string = obj.encode('utf-8', 'surrogateescape') return_string = return_string.decode('utf-8', 'replace') return return_string.encode(encoding, 'replace') raise # Note: We do these last even though we have to call to_bytes again on the # value because we're optimizing the common case if nonstring == 'simplerepr': try: value = str(obj) except UnicodeError: try: value = repr(obj) except UnicodeError: # Giving up return to_bytes('') elif nonstring == 'passthru': return obj elif nonstring == 'empty': # python2.4 doesn't have b'' return to_bytes('') elif nonstring == 'strict': raise TypeError('obj must be a string type') else: raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring) return to_bytes(value, encoding, errors) def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'): """Make sure that a string is a text string :arg obj: An object to make sure is a text string. In most cases this will be either a text string or a byte string. However, with ``nonstring='simplerepr'``, this can be used as a traceback-free version of ``str(obj)``. :kwarg encoding: The encoding to use to transform from a byte string to a text string. Defaults to using 'utf-8'. :kwarg errors: The error handler to use if the byte string is not decodable using the specified encoding. Any valid `codecs error handler <https://docs.python.org/3/library/codecs.html#codec-base-classes>`_ may be specified. We support three additional error strategies specifically aimed at helping people to port code: :surrogate_or_strict: Will use surrogateescape if it is a valid handler, otherwise it will use strict :surrogate_or_replace: Will use surrogateescape if it is a valid handler, otherwise it will use replace. :surrogate_then_replace: Does the same as surrogate_or_replace but `was added for symmetry with the error handlers in :func:`ansible.module_utils.common.text.converters.to_bytes` (Added in Ansible 2.3) Because surrogateescape was added in Python3 this usually means that Python3 will use `surrogateescape` and Python2 will use the fallback error handler. Note that the code checks for surrogateescape when the module is imported. If you have a backport of `surrogateescape` for python2, be sure to register the error handler prior to importing this module. The default until Ansible-2.2 was `surrogate_or_replace` In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry with :func:`ansible.module_utils.common.text.converters.to_bytes` . :kwarg nonstring: The strategy to use if a nonstring is specified in ``obj``. Default is 'simplerepr'. Valid values are: :simplerepr: The default. This takes the ``str`` of the object and then returns the text version of that string. :empty: Return an empty text string :passthru: Return the object passed in :strict: Raise a :exc:`TypeError` :returns: Typically this returns a text string. If a nonstring object is passed in this may be a different type depending on the strategy specified by nonstring. This will never return a byte string. From Ansible-2.3 onwards, the default is `surrogate_then_replace`. .. version_changed:: 2.3 Added the surrogate_then_replace error handler and made it the default error handler. """ if isinstance(obj, text_type): return obj if errors in _COMPOSED_ERROR_HANDLERS: if HAS_SURROGATEESCAPE: errors = 'surrogateescape' elif errors == 'surrogate_or_strict': errors = 'strict' else: errors = 'replace' if isinstance(obj, binary_type): # Note: We don't need special handling for surrogate_then_replace # because all bytes will either be made into surrogates or are valid # to decode. return obj.decode(encoding, errors) # Note: We do these last even though we have to call to_text again on the # value because we're optimizing the common case if nonstring == 'simplerepr': try: value = str(obj) except UnicodeError: try: value = repr(obj) except UnicodeError: # Giving up return u'' elif nonstring == 'passthru': return obj elif nonstring == 'empty': return u'' elif nonstring == 'strict': raise TypeError('obj must be a string type') else: raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring) return to_text(value, encoding, errors) #: :py:func:`to_native` #: Transform a variable into the native str type for the python version #: #: On Python2, this is an alias for #: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for #: :func:`~ansible.module_utils.to_text`. It makes it easier to #: transform a variable into the native str type for the python version #: the code is running on. Use this when constructing the message to #: send to exceptions or when dealing with an API that needs to take #: a native string. Example:: #: #: try: #: 1//0 #: except ZeroDivisionError as e: #: raise MyException('Encountered and error: %s' % to_native(e)) if PY3: to_native = to_text else: to_native = to_bytes def _json_encode_fallback(obj): if isinstance(obj, Set): return list(obj) elif isinstance(obj, datetime.datetime): return obj.isoformat() raise TypeError("Cannot json serialize %s" % to_native(obj)) def jsonify(data, **kwargs): # After 2.18, we should remove this loop, and hardcode to utf-8 in alignment with requiring utf-8 module responses for encoding in ("utf-8", "latin-1"): try: new_data = container_to_text(data, encoding=encoding) except UnicodeDecodeError: continue return json.dumps(new_data, default=_json_encode_fallback, **kwargs) raise UnicodeError('Invalid unicode encoding encountered') def container_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'): """ Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, and dict container types (the containers that the json module returns) """ if isinstance(d, text_type): return to_bytes(d, encoding=encoding, errors=errors) elif isinstance(d, dict): return dict(container_to_bytes(o, encoding, errors) for o in iteritems(d)) elif isinstance(d, list): return [container_to_bytes(o, encoding, errors) for o in d] elif isinstance(d, tuple): return tuple(container_to_bytes(o, encoding, errors) for o in d) else: return d def container_to_text(d, encoding='utf-8', errors='surrogate_or_strict'): """Recursively convert dict keys and values to text str Specialized for json return because this only handles, lists, tuples, and dict container types (the containers that the json module returns) """ if isinstance(d, binary_type): # Warning, can traceback return to_text(d, encoding=encoding, errors=errors) elif isinstance(d, dict): return dict(container_to_text(o, encoding, errors) for o in iteritems(d)) elif isinstance(d, list): return [container_to_text(o, encoding, errors) for o in d] elif isinstance(d, tuple): return tuple(container_to_text(o, encoding, errors) for o in d) else: return d
13,118
Python
.py
262
41.70229
118
0.66099
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,236
formatters.py
ansible_ansible/lib/ansible/module_utils/common/text/formatters.py
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import re from ansible.module_utils.six import iteritems SIZE_RANGES = { 'Y': 1 << 80, 'Z': 1 << 70, 'E': 1 << 60, 'P': 1 << 50, 'T': 1 << 40, 'G': 1 << 30, 'M': 1 << 20, 'K': 1 << 10, 'B': 1, } VALID_UNITS = { 'B': (('byte', 'B'), ('bit', 'b')), 'K': (('kilobyte', 'KB'), ('kilobit', 'Kb')), 'M': (('megabyte', 'MB'), ('megabit', 'Mb')), 'G': (('gigabyte', 'GB'), ('gigabit', 'Gb')), 'T': (('terabyte', 'TB'), ('terabit', 'Tb')), 'P': (('petabyte', 'PB'), ('petabit', 'Pb')), 'E': (('exabyte', 'EB'), ('exabit', 'Eb')), 'Z': (('zetabyte', 'ZB'), ('zetabit', 'Zb')), 'Y': (('yottabyte', 'YB'), ('yottabit', 'Yb')), } def lenient_lowercase(lst): """Lowercase elements of a list. If an element is not a string, pass it through untouched. """ lowered = [] for value in lst: try: lowered.append(value.lower()) except AttributeError: lowered.append(value) return lowered def human_to_bytes(number, default_unit=None, isbits=False): """Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument. example: human_to_bytes('10M') <=> human_to_bytes(10, 'M'). When isbits is False (default), converts bytes from a human-readable format to integer. example: human_to_bytes('1MB') returns 1048576 (int). The function expects 'B' (uppercase) as a byte identifier passed as a part of 'name' param string or 'unit', e.g. 'MB'/'KB'/etc. (except when the identifier is single 'b', it is perceived as a byte identifier too). if 'Mb'/'Kb'/... is passed, the ValueError will be rased. When isbits is True, converts bits from a human-readable format to integer. example: human_to_bytes('1Mb', isbits=True) returns 8388608 (int) - string bits representation was passed and return as a number or bits. The function expects 'b' (lowercase) as a bit identifier, e.g. 'Mb'/'Kb'/etc. if 'MB'/'KB'/... is passed, the ValueError will be rased. """ m = re.search(r'^([0-9]*\.?[0-9]+)(?:\s*([A-Za-z]+))?\s*$', str(number)) if m is None: raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number)) try: num = float(m.group(1)) except Exception: raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number)) unit = m.group(2) if unit is None: unit = default_unit if unit is None: # No unit given, returning raw number return int(round(num)) range_key = unit[0].upper() try: limit = SIZE_RANGES[range_key] except Exception: raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys()))) # default value unit_class = 'B' unit_class_name = 'byte' # handling bits case if isbits: unit_class = 'b' unit_class_name = 'bit' # check unit value if more than one character (KB, MB) if len(unit) > 1: expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key) if range_key == 'B': expect_message = 'expect %s or %s' % (unit_class, unit_class_name) unit_group = VALID_UNITS.get(range_key, None) if unit_group is None: raise ValueError(f"human_to_bytes() can't interpret a valid unit for {range_key}") isbits_flag = 1 if isbits else 0 if unit.lower() == unit_group[isbits_flag][0]: pass elif unit != unit_group[isbits_flag][1]: raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message)) return int(round(num * limit)) def bytes_to_human(size, isbits=False, unit=None): base = 'Bytes' if isbits: base = 'bits' suffix = '' for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]): if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]: break if limit != 1: suffix += base[0] else: suffix = base return '%.2f %s' % (size / limit, suffix)
4,502
Python
.py
106
36.009434
155
0.592957
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,237
_distro.py
ansible_ansible/lib/ansible/module_utils/distro/_distro.py
# Copyright 2015-2021 Nir Cohen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # A local copy of the license can be found in licenses/Apache-License.txt # # Modifications to this code have been made by Ansible Project """ The ``distro`` package (``distro`` stands for Linux Distribution) provides information about the Linux distribution it runs on, such as a reliable machine-readable distro ID, or version information. It is the recommended replacement for Python's original :py:func:`platform.linux_distribution` function, but it provides much more functionality. An alternative implementation became necessary because Python 3.5 deprecated this function, and Python 3.8 removed it altogether. Its predecessor function :py:func:`platform.dist` was already deprecated since Python 2.6 and removed in Python 3.8. Still, there are many cases in which access to OS distribution information is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for more information. """ from __future__ import annotations import argparse import json import logging import os import re import shlex import subprocess import sys import warnings from typing import ( Any, Callable, Dict, Iterable, Optional, Sequence, TextIO, Tuple, Type, ) try: from typing import TypedDict except ImportError: # Python 3.7 TypedDict = dict __version__ = "1.9.0" class VersionDict(TypedDict): major: str minor: str build_number: str class InfoDict(TypedDict): id: str version: str version_parts: VersionDict like: str codename: str _UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") _UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") _OS_RELEASE_BASENAME = "os-release" #: Translation table for normalizing the "ID" attribute defined in os-release #: files, for use by the :func:`distro.id` method. #: #: * Key: Value as defined in the os-release file, translated to lower case, #: with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_OS_ID = { "ol": "oracle", # Oracle Linux "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap } #: Translation table for normalizing the "Distributor ID" attribute returned by #: the lsb_release command, for use by the :func:`distro.id` method. #: #: * Key: Value as returned by the lsb_release command, translated to lower #: case, with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_LSB_ID = { "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4 "enterpriseenterpriseserver": "oracle", # Oracle Linux 5 "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode } #: Translation table for normalizing the distro ID derived from the file name #: of distro release files, for use by the :func:`distro.id` method. #: #: * Key: Value as derived from the file name of a distro release file, #: translated to lower case, with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_DISTRO_ID = { "redhat": "rhel", # RHEL 6.x, 7.x } # Pattern for content of distro release file (reversed) _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" ) # Pattern for base file name of distro release file _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") # Base file names to be looked up for if _UNIXCONFDIR is not readable. _DISTRO_RELEASE_BASENAMES = [ "SuSE-release", "altlinux-release", "arch-release", "base-release", "centos-release", "fedora-release", "gentoo-release", "mageia-release", "mandrake-release", "mandriva-release", "mandrivalinux-release", "manjaro-release", "oracle-release", "redhat-release", "rocky-release", "sl-release", "slackware-version", ] # Base file names to be ignored when searching for distro release file _DISTRO_RELEASE_IGNORE_BASENAMES = ( "debian_version", "lsb-release", "oem-release", _OS_RELEASE_BASENAME, "system-release", "plesk-release", "iredmail-release", "board-release", "ec2_version", ) def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: """ .. deprecated:: 1.6.0 :func:`distro.linux_distribution()` is deprecated. It should only be used as a compatibility shim with Python's :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, :func:`distro.version` and :func:`distro.name` instead. Return information about the current OS distribution as a tuple ``(id_name, version, codename)`` with items as follows: * ``id_name``: If *full_distribution_name* is false, the result of :func:`distro.id`. Otherwise, the result of :func:`distro.name`. * ``version``: The result of :func:`distro.version`. * ``codename``: The extra item (usually in parentheses) after the os-release version number, or the result of :func:`distro.codename`. The interface of this function is compatible with the original :py:func:`platform.linux_distribution` function, supporting a subset of its parameters. The data it returns may not exactly be the same, because it uses more data sources than the original function, and that may lead to different data if the OS distribution is not consistent across multiple data sources it provides (there are indeed such distributions ...). Another reason for differences is the fact that the :func:`distro.id` method normalizes the distro ID string to a reliable machine-readable value for a number of popular OS distributions. """ warnings.warn( "distro.linux_distribution() is deprecated. It should only be used as a " "compatibility shim with Python's platform.linux_distribution(). Please use " "distro.id(), distro.version() and distro.name() instead.", DeprecationWarning, stacklevel=2, ) return _distro.linux_distribution(full_distribution_name) def id() -> str: """ Return the distro ID of the current distribution, as a machine-readable string. For a number of OS distributions, the returned distro ID value is *reliable*, in the sense that it is documented and that it does not change across releases of the distribution. This package maintains the following reliable distro ID values: ============== ========================================= Distro ID Distribution ============== ========================================= "ubuntu" Ubuntu "debian" Debian "rhel" RedHat Enterprise Linux "centos" CentOS "fedora" Fedora "sles" SUSE Linux Enterprise Server "opensuse" openSUSE "amzn" Amazon Linux "arch" Arch Linux "buildroot" Buildroot "cloudlinux" CloudLinux OS "exherbo" Exherbo Linux "gentoo" GenToo Linux "ibm_powerkvm" IBM PowerKVM "kvmibm" KVM for IBM z Systems "linuxmint" Linux Mint "mageia" Mageia "mandriva" Mandriva Linux "parallels" Parallels "pidora" Pidora "raspbian" Raspbian "oracle" Oracle Linux (and Oracle Enterprise Linux) "scientific" Scientific Linux "slackware" Slackware "xenserver" XenServer "openbsd" OpenBSD "netbsd" NetBSD "freebsd" FreeBSD "midnightbsd" MidnightBSD "rocky" Rocky Linux "aix" AIX "guix" Guix System "altlinux" ALT Linux ============== ========================================= If you have a need to get distros for reliable IDs added into this set, or if you find that the :func:`distro.id` function returns a different distro ID for one of the listed distros, please create an issue in the `distro issue tracker`_. **Lookup hierarchy and transformations:** First, the ID is obtained from the following sources, in the specified order. The first available and non-empty value is used: * the value of the "ID" attribute of the os-release file, * the value of the "Distributor ID" attribute returned by the lsb_release command, * the first part of the file name of the distro release file, The so determined ID value then passes the following transformations, before it is returned by this method: * it is translated to lower case, * blanks (which should not be there anyway) are translated to underscores, * a normalization of the ID is performed, based upon `normalization tables`_. The purpose of this normalization is to ensure that the ID is as reliable as possible, even across incompatible changes in the OS distributions. A common reason for an incompatible change is the addition of an os-release file, or the addition of the lsb_release command, with ID values that differ from what was previously determined from the distro release file name. """ return _distro.id() def name(pretty: bool = False) -> str: """ Return the name of the current OS distribution, as a human-readable string. If *pretty* is false, the name is returned without version or codename. (e.g. "CentOS Linux") If *pretty* is true, the version and codename are appended. (e.g. "CentOS Linux 7.1.1503 (Core)") **Lookup hierarchy:** The name is obtained from the following sources, in the specified order. The first available and non-empty value is used: * If *pretty* is false: - the value of the "NAME" attribute of the os-release file, - the value of the "Distributor ID" attribute returned by the lsb_release command, - the value of the "<name>" field of the distro release file. * If *pretty* is true: - the value of the "PRETTY_NAME" attribute of the os-release file, - the value of the "Description" attribute returned by the lsb_release command, - the value of the "<name>" field of the distro release file, appended with the value of the pretty version ("<version_id>" and "<codename>" fields) of the distro release file, if available. """ return _distro.name(pretty) def version(pretty: bool = False, best: bool = False) -> str: """ Return the version of the current OS distribution, as a human-readable string. If *pretty* is false, the version is returned without codename (e.g. "7.0"). If *pretty* is true, the codename in parenthesis is appended, if the codename is non-empty (e.g. "7.0 (Maipo)"). Some distributions provide version numbers with different precisions in the different sources of distribution information. Examining the different sources in a fixed priority order does not always yield the most precise version (e.g. for Debian 8.2, or CentOS 7.1). Some other distributions may not provide this kind of information. In these cases, an empty string would be returned. This behavior can be observed with rolling releases distributions (e.g. Arch Linux). The *best* parameter can be used to control the approach for the returned version: If *best* is false, the first non-empty version number in priority order of the examined sources is returned. If *best* is true, the most precise version number out of all examined sources is returned. **Lookup hierarchy:** In all cases, the version number is obtained from the following sources. If *best* is false, this order represents the priority order: * the value of the "VERSION_ID" attribute of the os-release file, * the value of the "Release" attribute returned by the lsb_release command, * the version number parsed from the "<version_id>" field of the first line of the distro release file, * the version number parsed from the "PRETTY_NAME" attribute of the os-release file, if it follows the format of the distro release files. * the version number parsed from the "Description" attribute returned by the lsb_release command, if it follows the format of the distro release files. """ return _distro.version(pretty, best) def version_parts(best: bool = False) -> Tuple[str, str, str]: """ Return the version of the current OS distribution as a tuple ``(major, minor, build_number)`` with items as follows: * ``major``: The result of :func:`distro.major_version`. * ``minor``: The result of :func:`distro.minor_version`. * ``build_number``: The result of :func:`distro.build_number`. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.version_parts(best) def major_version(best: bool = False) -> str: """ Return the major version of the current OS distribution, as a string, if provided. Otherwise, the empty string is returned. The major version is the first part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.major_version(best) def minor_version(best: bool = False) -> str: """ Return the minor version of the current OS distribution, as a string, if provided. Otherwise, the empty string is returned. The minor version is the second part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.minor_version(best) def build_number(best: bool = False) -> str: """ Return the build number of the current OS distribution, as a string, if provided. Otherwise, the empty string is returned. The build number is the third part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.build_number(best) def like() -> str: """ Return a space-separated list of distro IDs of distributions that are closely related to the current OS distribution in regards to packaging and programming interfaces, for example distributions the current distribution is a derivative from. **Lookup hierarchy:** This information item is only provided by the os-release file. For details, see the description of the "ID_LIKE" attribute in the `os-release man page <http://www.freedesktop.org/software/systemd/man/os-release.html>`_. """ return _distro.like() def codename() -> str: """ Return the codename for the release of the current OS distribution, as a string. If the distribution does not have a codename, an empty string is returned. Note that the returned codename is not always really a codename. For example, openSUSE returns "x86_64". This function does not handle such cases in any special way and just returns the string it finds, if any. **Lookup hierarchy:** * the codename within the "VERSION" attribute of the os-release file, if provided, * the value of the "Codename" attribute returned by the lsb_release command, * the value of the "<codename>" field of the distro release file. """ return _distro.codename() def info(pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information items about the current OS distribution in a dictionary, as shown in the following example: .. sourcecode:: python { 'id': 'rhel', 'version': '7.0', 'version_parts': { 'major': '7', 'minor': '0', 'build_number': '' }, 'like': 'fedora', 'codename': 'Maipo' } The dictionary structure and keys are always the same, regardless of which information items are available in the underlying data sources. The values for the various keys are as follows: * ``id``: The result of :func:`distro.id`. * ``version``: The result of :func:`distro.version`. * ``version_parts -> major``: The result of :func:`distro.major_version`. * ``version_parts -> minor``: The result of :func:`distro.minor_version`. * ``version_parts -> build_number``: The result of :func:`distro.build_number`. * ``like``: The result of :func:`distro.like`. * ``codename``: The result of :func:`distro.codename`. For a description of the *pretty* and *best* parameters, see the :func:`distro.version` method. """ return _distro.info(pretty, best) def os_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the current OS distribution. See `os-release file`_ for details about these information items. """ return _distro.os_release_info() def lsb_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the current OS distribution. See `lsb_release command output`_ for details about these information items. """ return _distro.lsb_release_info() def distro_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. See `distro release file`_ for details about these information items. """ return _distro.distro_release_info() def uname_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. """ return _distro.uname_info() def os_release_attr(attribute: str) -> str: """ Return a single named information item from the os-release file data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `os-release file`_ for details about these information items. """ return _distro.os_release_attr(attribute) def lsb_release_attr(attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `lsb_release command output`_ for details about these information items. """ return _distro.lsb_release_attr(attribute) def distro_release_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `distro release file`_ for details about these information items. """ return _distro.distro_release_attr(attribute) def uname_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. """ return _distro.uname_attr(attribute) try: from functools import cached_property except ImportError: # Python < 3.8 class cached_property: # type: ignore """A version of @property which caches the value. On access, it calls the underlying function and sets the value in `__dict__` so future accesses will not re-call the property. """ def __init__(self, f: Callable[[Any], Any]) -> None: self._fname = f.__name__ self._f = f def __get__(self, obj: Any, owner: Type[Any]) -> Any: assert obj is not None, f"call {self._fname} on an instance" ret = obj.__dict__[self._fname] = self._f(obj) return ret class LinuxDistribution: """ Provides information about a OS distribution. This package creates a private module-global instance of this class with default initialization arguments, that is used by the `consolidated accessor functions`_ and `single source accessor functions`_. By using default initialization arguments, that module-global instance returns data about the current OS distribution (i.e. the distro this package runs on). Normally, it is not necessary to create additional instances of this class. However, in situations where control is needed over the exact data sources that are used, instances of this class can be created with a specific distro release file, or a specific os-release file, or without invoking the lsb_release command. """ def __init__( self, include_lsb: Optional[bool] = None, os_release_file: str = "", distro_release_file: str = "", include_uname: Optional[bool] = None, root_dir: Optional[str] = None, include_oslevel: Optional[bool] = None, ) -> None: """ The initialization method of this class gathers information from the available data sources, and stores that in private instance attributes. Subsequent access to the information items uses these private instance attributes, so that the data sources are read only once. Parameters: * ``include_lsb`` (bool): Controls whether the `lsb_release command output`_ is included as a data source. If the lsb_release command is not available in the program execution path, the data source for the lsb_release command will be empty. * ``os_release_file`` (string): The path name of the `os-release file`_ that is to be used as a data source. An empty string (the default) will cause the default path name to be used (see `os-release file`_ for details). If the specified or defaulted os-release file does not exist, the data source for the os-release file will be empty. * ``distro_release_file`` (string): The path name of the `distro release file`_ that is to be used as a data source. An empty string (the default) will cause a default search algorithm to be used (see `distro release file`_ for details). If the specified distro release file does not exist, or if no default distro release file can be found, the data source for the distro release file will be empty. * ``include_uname`` (bool): Controls whether uname command output is included as a data source. If the uname command is not available in the program execution path the data source for the uname command will be empty. * ``root_dir`` (string): The absolute path to the root directory to use to find distro-related information files. Note that ``include_*`` parameters must not be enabled in combination with ``root_dir``. * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command output is included as a data source. If the oslevel command is not available in the program execution path the data source will be empty. Public instance attributes: * ``os_release_file`` (string): The path name of the `os-release file`_ that is actually used as a data source. The empty string if no distro release file is used as a data source. * ``distro_release_file`` (string): The path name of the `distro release file`_ that is actually used as a data source. The empty string if no distro release file is used as a data source. * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. This controls whether the lsb information will be loaded. * ``include_uname`` (bool): The result of the ``include_uname`` parameter. This controls whether the uname information will be loaded. * ``include_oslevel`` (bool): The result of the ``include_oslevel`` parameter. This controls whether (AIX) oslevel information will be loaded. * ``root_dir`` (string): The result of the ``root_dir`` parameter. The absolute path to the root directory to use to find distro-related information files. Raises: * :py:exc:`ValueError`: Initialization parameters combination is not supported. * :py:exc:`OSError`: Some I/O issue with an os-release file or distro release file. * :py:exc:`UnicodeError`: A data source has unexpected characters or uses an unexpected encoding. """ self.root_dir = root_dir self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR self.usr_lib_dir = ( os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR ) if os_release_file: self.os_release_file = os_release_file else: etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) usr_lib_os_release_file = os.path.join( self.usr_lib_dir, _OS_RELEASE_BASENAME ) # NOTE: The idea is to respect order **and** have it set # at all times for API backwards compatibility. if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( usr_lib_os_release_file ): self.os_release_file = etc_dir_os_release_file else: self.os_release_file = usr_lib_os_release_file self.distro_release_file = distro_release_file or "" # updated later is_root_dir_defined = root_dir is not None if is_root_dir_defined and (include_lsb or include_uname or include_oslevel): raise ValueError( "Including subprocess data sources from specific root_dir is disallowed" " to prevent false information" ) self.include_lsb = ( include_lsb if include_lsb is not None else not is_root_dir_defined ) self.include_uname = ( include_uname if include_uname is not None else not is_root_dir_defined ) self.include_oslevel = ( include_oslevel if include_oslevel is not None else not is_root_dir_defined ) def __repr__(self) -> str: """Return repr of all info""" return ( "LinuxDistribution(" "os_release_file={self.os_release_file!r}, " "distro_release_file={self.distro_release_file!r}, " "include_lsb={self.include_lsb!r}, " "include_uname={self.include_uname!r}, " "include_oslevel={self.include_oslevel!r}, " "root_dir={self.root_dir!r}, " "_os_release_info={self._os_release_info!r}, " "_lsb_release_info={self._lsb_release_info!r}, " "_distro_release_info={self._distro_release_info!r}, " "_uname_info={self._uname_info!r}, " "_oslevel_info={self._oslevel_info!r})".format(self=self) ) def linux_distribution( self, full_distribution_name: bool = True ) -> Tuple[str, str, str]: """ Return information about the OS distribution that is compatible with Python's :func:`platform.linux_distribution`, supporting a subset of its parameters. For details, see :func:`distro.linux_distribution`. """ return ( self.name() if full_distribution_name else self.id(), self.version(), self._os_release_info.get("release_codename") or self.codename(), ) def id(self) -> str: """Return the distro ID of the OS distribution, as a string. For details, see :func:`distro.id`. """ def normalize(distro_id: str, table: Dict[str, str]) -> str: distro_id = distro_id.lower().replace(" ", "_") return table.get(distro_id, distro_id) distro_id = self.os_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_OS_ID) distro_id = self.lsb_release_attr("distributor_id") if distro_id: return normalize(distro_id, NORMALIZED_LSB_ID) distro_id = self.distro_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) distro_id = self.uname_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) return "" def name(self, pretty: bool = False) -> str: """ Return the name of the OS distribution, as a string. For details, see :func:`distro.name`. """ name = ( self.os_release_attr("name") or self.lsb_release_attr("distributor_id") or self.distro_release_attr("name") or self.uname_attr("name") ) if pretty: name = self.os_release_attr("pretty_name") or self.lsb_release_attr( "description" ) if not name: name = self.distro_release_attr("name") or self.uname_attr("name") version = self.version(pretty=True) if version: name = f"{name} {version}" return name or "" def version(self, pretty: bool = False, best: bool = False) -> str: """ Return the version of the OS distribution, as a string. For details, see :func:`distro.version`. """ versions = [ self.os_release_attr("version_id"), self.lsb_release_attr("release"), self.distro_release_attr("version_id"), self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( "version_id", "" ), self._parse_distro_release_content( self.lsb_release_attr("description") ).get("version_id", ""), self.uname_attr("release"), ] if self.uname_attr("id").startswith("aix"): # On AIX platforms, prefer oslevel command output. versions.insert(0, self.oslevel_info()) elif self.id() == "debian" or "debian" in self.like().split(): # On Debian-like, add debian_version file content to candidates list. versions.append(self._debian_version) version = "" if best: # This algorithm uses the last version in priority order that has # the best precision. If the versions are not in conflict, that # does not matter; otherwise, using the last one instead of the # first one might be considered a surprise. for v in versions: if v.count(".") > version.count(".") or version == "": version = v else: for v in versions: if v != "": version = v break if pretty and version and self.codename(): version = f"{version} ({self.codename()})" return version def version_parts(self, best: bool = False) -> Tuple[str, str, str]: """ Return the version of the OS distribution, as a tuple of version numbers. For details, see :func:`distro.version_parts`. """ version_str = self.version(best=best) if version_str: version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") matches = version_regex.match(version_str) if matches: major, minor, build_number = matches.groups() return major, minor or "", build_number or "" return "", "", "" def major_version(self, best: bool = False) -> str: """ Return the major version number of the current distribution. For details, see :func:`distro.major_version`. """ return self.version_parts(best)[0] def minor_version(self, best: bool = False) -> str: """ Return the minor version number of the current distribution. For details, see :func:`distro.minor_version`. """ return self.version_parts(best)[1] def build_number(self, best: bool = False) -> str: """ Return the build number of the current distribution. For details, see :func:`distro.build_number`. """ return self.version_parts(best)[2] def like(self) -> str: """ Return the IDs of distributions that are like the OS distribution. For details, see :func:`distro.like`. """ return self.os_release_attr("id_like") or "" def codename(self) -> str: """ Return the codename of the OS distribution. For details, see :func:`distro.codename`. """ try: # Handle os_release specially since distros might purposefully set # this to empty string to have no codename return self._os_release_info["codename"] except KeyError: return ( self.lsb_release_attr("codename") or self.distro_release_attr("codename") or "" ) def info(self, pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information about the OS distribution. For details, see :func:`distro.info`. """ return InfoDict( id=self.id(), version=self.version(pretty, best), version_parts=VersionDict( major=self.major_version(best), minor=self.minor_version(best), build_number=self.build_number(best), ), like=self.like(), codename=self.codename(), ) def os_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the OS distribution. For details, see :func:`distro.os_release_info`. """ return self._os_release_info def lsb_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the OS distribution. For details, see :func:`distro.lsb_release_info`. """ return self._lsb_release_info def distro_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_info`. """ return self._distro_release_info def uname_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the uname command data source of the OS distribution. For details, see :func:`distro.uname_info`. """ return self._uname_info def oslevel_info(self) -> str: """ Return AIX' oslevel command output. """ return self._oslevel_info def os_release_attr(self, attribute: str) -> str: """ Return a single named information item from the os-release file data source of the OS distribution. For details, see :func:`distro.os_release_attr`. """ return self._os_release_info.get(attribute, "") def lsb_release_attr(self, attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the OS distribution. For details, see :func:`distro.lsb_release_attr`. """ return self._lsb_release_info.get(attribute, "") def distro_release_attr(self, attribute: str) -> str: """ Return a single named information item from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_attr`. """ return self._distro_release_info.get(attribute, "") def uname_attr(self, attribute: str) -> str: """ Return a single named information item from the uname command output data source of the OS distribution. For details, see :func:`distro.uname_attr`. """ return self._uname_info.get(attribute, "") @cached_property def _os_release_info(self) -> Dict[str, str]: """ Get the information items from the specified os-release file. Returns: A dictionary containing all information items. """ if os.path.isfile(self.os_release_file): with open(self.os_release_file, encoding="utf-8") as release_file: return self._parse_os_release_content(release_file) return {} @staticmethod def _parse_os_release_content(lines: TextIO) -> Dict[str, str]: """ Parse the lines of an os-release file. Parameters: * lines: Iterable through the lines in the os-release file. Each line must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ props = {} lexer = shlex.shlex(lines, posix=True) lexer.whitespace_split = True tokens = list(lexer) for token in tokens: # At this point, all shell-like parsing has been done (i.e. # comments processed, quotes and backslash escape sequences # processed, multi-line values assembled, trailing newlines # stripped, etc.), so the tokens are now either: # * variable assignments: var=value # * commands or their arguments (not allowed in os-release) # Ignore any tokens that are not variable assignments if "=" in token: k, v = token.split("=", 1) props[k.lower()] = v if "version" in props: # extract release codename (if any) from version attribute match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"]) if match: release_codename = match.group(1) or match.group(2) props["codename"] = props["release_codename"] = release_codename if "version_codename" in props: # os-release added a version_codename field. Use that in # preference to anything else Note that some distros purposefully # do not have code names. They should be setting # version_codename="" props["codename"] = props["version_codename"] elif "ubuntu_codename" in props: # Same as above but a non-standard field name used on older Ubuntus props["codename"] = props["ubuntu_codename"] return props @cached_property def _lsb_release_info(self) -> Dict[str, str]: """ Get the information items from the lsb_release command output. Returns: A dictionary containing all information items. """ if not self.include_lsb: return {} try: cmd = ("lsb_release", "-a") stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) @staticmethod def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]: """ Parse the output of the lsb_release command. Parameters: * lines: Iterable through the lines of the lsb_release output. Each line must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ props = {} for line in lines: kv = line.strip("\n").split(":", 1) if len(kv) != 2: # Ignore lines without colon. continue k, v = kv props.update({k.replace(" ", "_").lower(): v.strip()}) return props @cached_property def _uname_info(self) -> Dict[str, str]: if not self.include_uname: return {} try: cmd = ("uname", "-rs") stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) except OSError: return {} content = self._to_str(stdout).splitlines() return self._parse_uname_content(content) @cached_property def _oslevel_info(self) -> str: if not self.include_oslevel: return "" try: stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL) except (OSError, subprocess.CalledProcessError): return "" return self._to_str(stdout).strip() @cached_property def _debian_version(self) -> str: try: with open( os.path.join(self.etc_dir, "debian_version"), encoding="ascii" ) as fp: return fp.readline().rstrip() except FileNotFoundError: return "" @staticmethod def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]: if not lines: return {} props = {} match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip()) if match: name, version = match.groups() # This is to prevent the Linux kernel version from # appearing as the 'best' version on otherwise # identifiable distributions. if name == "Linux": return {} props["id"] = name.lower() props["name"] = name props["release"] = version return props @staticmethod def _to_str(bytestring: bytes) -> str: encoding = sys.getfilesystemencoding() return bytestring.decode(encoding) @cached_property def _distro_release_info(self) -> Dict[str, str]: """ Get the information items from the specified distro release file. Returns: A dictionary containing all information items. """ if self.distro_release_file: # If it was specified, we use it and parse what we can, even if # its file name or content does not match the expected pattern. distro_info = self._parse_distro_release_file(self.distro_release_file) basename = os.path.basename(self.distro_release_file) # The file name pattern for user-specified distro release files # is somewhat more tolerant (compared to when searching for the # file), because we want to use what was specified as best as # possible. match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) else: try: basenames = [ basename for basename in os.listdir(self.etc_dir) if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES and os.path.isfile(os.path.join(self.etc_dir, basename)) ] # We sort for repeatability in cases where there are multiple # distro specific files; e.g. CentOS, Oracle, Enterprise all # containing `redhat-release` on top of their own. basenames.sort() except OSError: # This may occur when /etc is not readable but we can't be # sure about the *-release files. Check common entries of # /etc for information. If they turn out to not be there the # error is handled in `_parse_distro_release_file()`. basenames = _DISTRO_RELEASE_BASENAMES for basename in basenames: match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) if match is None: continue filepath = os.path.join(self.etc_dir, basename) distro_info = self._parse_distro_release_file(filepath) # The name is always present if the pattern matches. if "name" not in distro_info: continue self.distro_release_file = filepath break else: # the loop didn't "break": no candidate. return {} if match is not None: distro_info["id"] = match.group(1) # CloudLinux < 7: manually enrich info with proper id. if "cloudlinux" in distro_info.get("name", "").lower(): distro_info["id"] = "cloudlinux" return distro_info def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]: """ Parse a distro release file. Parameters: * filepath: Path name of the distro release file. Returns: A dictionary containing all information items. """ try: with open(filepath, encoding="utf-8") as fp: # Only parse the first line. For instance, on SLES there # are multiple lines. We don't want them... return self._parse_distro_release_content(fp.readline()) except OSError: # Ignore not being able to read a specific, seemingly version # related file. # See https://github.com/python-distro/distro/issues/162 return {} @staticmethod def _parse_distro_release_content(line: str) -> Dict[str, str]: """ Parse a line from a distro release file. Parameters: * line: Line from the distro release file. Must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. """ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) distro_info = {} if matches: # regexp ensures non-None distro_info["name"] = matches.group(3)[::-1] if matches.group(2): distro_info["version_id"] = matches.group(2)[::-1] if matches.group(1): distro_info["codename"] = matches.group(1)[::-1] elif line: distro_info["name"] = line.strip() return distro_info _distro = LinuxDistribution() def main() -> None: logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) parser = argparse.ArgumentParser(description="OS distro info tool") parser.add_argument( "--json", "-j", help="Output in machine readable format", action="store_true" ) parser.add_argument( "--root-dir", "-r", type=str, dest="root_dir", help="Path to the root filesystem directory (defaults to /)", ) args = parser.parse_args() if args.root_dir: dist = LinuxDistribution( include_lsb=False, include_uname=False, include_oslevel=False, root_dir=args.root_dir, ) else: dist = _distro if args.json: logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) else: logger.info("Name: %s", dist.name(pretty=True)) distribution_version = dist.version(pretty=True) logger.info("Version: %s", distribution_version) distribution_codename = dist.codename() logger.info("Codename: %s", distribution_codename) if __name__ == "__main__": main()
49,584
Python
.py
1,133
35.551633
88
0.635013
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,238
__init__.py
ansible_ansible/lib/ansible/module_utils/distro/__init__.py
# (c) 2018 Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ Compat distro library. """ from __future__ import annotations # The following makes it easier for us to script updates of the bundled code _BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.9.0"} # The following additional changes have been made: # * Remove optparse since it is not needed for our use. # * A format string including {} has been changed to {0} (py2.6 compat) # * Port two calls from subprocess.check_output to subprocess.Popen().communicate() (py2.6 compat) import sys import types try: import distro as _system_distro except ImportError: _system_distro = None else: # There could be a 'distro' package/module that isn't what we expect, on the # PYTHONPATH. Rather than erroring out in this case, just fall back to ours. # We require more functions than distro.id(), but this is probably a decent # test that we have something we can reasonably use. if not hasattr(_system_distro, 'id') or \ not isinstance(_system_distro.id, types.FunctionType): _system_distro = None if _system_distro: distro = _system_distro else: # Our bundled copy from ansible.module_utils.distro import _distro as distro sys.modules['ansible.module_utils.distro'] = distro
1,943
Python
.py
46
39.847826
98
0.748941
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,239
_daemon_threading.py
ansible_ansible/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
"""Proxy stdlib threading module that only supports non-joinable daemon threads.""" # NB: all new local module attrs are _ prefixed to ensure an identical public attribute surface area to the module we're proxying from __future__ import annotations as _annotations import threading as _threading import typing as _t class _DaemonThread(_threading.Thread): """ Daemon-only Thread subclass; prevents running threads of this type from blocking interpreter shutdown and process exit. The join() method is a no-op. """ def __init__(self, *args, daemon: bool | None = None, **kwargs) -> None: super().__init__(*args, daemon=daemon or True, **kwargs) def join(self, timeout=None) -> None: """ThreadPoolExecutor's atexit handler joins all queue threads before allowing shutdown; prevent them from blocking.""" Thread = _DaemonThread # shadow the real Thread attr with our _DaemonThread def __getattr__(name: str) -> _t.Any: """Delegate anything not defined locally to the real `threading` module.""" return getattr(_threading, name)
1,084
Python
.py
18
56
129
0.729167
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,240
_futures.py
ansible_ansible/lib/ansible/module_utils/_internal/_concurrent/_futures.py
"""Utilities for concurrent code execution using futures.""" from __future__ import annotations import concurrent.futures import types from . import _daemon_threading class DaemonThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor): """ThreadPoolExecutor subclass that creates non-joinable daemon threads for non-blocking pool and process shutdown with abandoned threads.""" atc = concurrent.futures.ThreadPoolExecutor._adjust_thread_count # clone the base class `_adjust_thread_count` method with a copy of its globals dict _adjust_thread_count = types.FunctionType(atc.__code__, atc.__globals__.copy(), name=atc.__name__, argdefs=atc.__defaults__, closure=atc.__closure__) # patch the method closure's `threading` module import to use our daemon-only thread factory instead _adjust_thread_count.__globals__.update(threading=_daemon_threading) del atc # don't expose this as a class attribute
936
Python
.py
13
68.230769
153
0.772678
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,241
default_collectors.py
ansible_ansible/lib/ansible/module_utils/facts/default_collectors.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector from ansible.module_utils.facts.other.facter import FacterFactCollector from ansible.module_utils.facts.other.ohai import OhaiFactCollector from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector from ansible.module_utils.facts.system.chroot import ChrootFactCollector from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector from ansible.module_utils.facts.system.distribution import DistributionFactCollector from ansible.module_utils.facts.system.date_time import DateTimeFactCollector from ansible.module_utils.facts.system.env import EnvFactCollector from ansible.module_utils.facts.system.dns import DnsFactCollector from ansible.module_utils.facts.system.fips import FipsFactCollector from ansible.module_utils.facts.system.loadavg import LoadAvgFactCollector from ansible.module_utils.facts.system.local import LocalFactCollector from ansible.module_utils.facts.system.lsb import LSBFactCollector from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector from ansible.module_utils.facts.system.pkg_mgr import OpenBSDPkgMgrFactCollector from ansible.module_utils.facts.system.platform import PlatformFactCollector from ansible.module_utils.facts.system.python import PythonFactCollector from ansible.module_utils.facts.system.selinux import SelinuxFactCollector from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector from ansible.module_utils.facts.system.systemd import SystemdFactCollector from ansible.module_utils.facts.system.user import UserFactCollector from ansible.module_utils.facts.hardware.base import HardwareCollector from ansible.module_utils.facts.hardware.aix import AIXHardwareCollector from ansible.module_utils.facts.hardware.darwin import DarwinHardwareCollector from ansible.module_utils.facts.hardware.dragonfly import DragonFlyHardwareCollector from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardwareCollector from ansible.module_utils.facts.hardware.hpux import HPUXHardwareCollector from ansible.module_utils.facts.hardware.hurd import HurdHardwareCollector from ansible.module_utils.facts.hardware.linux import LinuxHardwareCollector from ansible.module_utils.facts.hardware.netbsd import NetBSDHardwareCollector from ansible.module_utils.facts.hardware.openbsd import OpenBSDHardwareCollector from ansible.module_utils.facts.hardware.sunos import SunOSHardwareCollector from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.aix import AIXNetworkCollector from ansible.module_utils.facts.network.darwin import DarwinNetworkCollector from ansible.module_utils.facts.network.dragonfly import DragonFlyNetworkCollector from ansible.module_utils.facts.network.fc_wwn import FcWwnInitiatorFactCollector from ansible.module_utils.facts.network.freebsd import FreeBSDNetworkCollector from ansible.module_utils.facts.network.hpux import HPUXNetworkCollector from ansible.module_utils.facts.network.hurd import HurdNetworkCollector from ansible.module_utils.facts.network.linux import LinuxNetworkCollector from ansible.module_utils.facts.network.iscsi import IscsiInitiatorNetworkCollector from ansible.module_utils.facts.network.nvme import NvmeInitiatorNetworkCollector from ansible.module_utils.facts.network.netbsd import NetBSDNetworkCollector from ansible.module_utils.facts.network.openbsd import OpenBSDNetworkCollector from ansible.module_utils.facts.network.sunos import SunOSNetworkCollector from ansible.module_utils.facts.virtual.base import VirtualCollector from ansible.module_utils.facts.virtual.dragonfly import DragonFlyVirtualCollector from ansible.module_utils.facts.virtual.freebsd import FreeBSDVirtualCollector from ansible.module_utils.facts.virtual.hpux import HPUXVirtualCollector from ansible.module_utils.facts.virtual.linux import LinuxVirtualCollector from ansible.module_utils.facts.virtual.netbsd import NetBSDVirtualCollector from ansible.module_utils.facts.virtual.openbsd import OpenBSDVirtualCollector from ansible.module_utils.facts.virtual.sunos import SunOSVirtualCollector # these should always be first due to most other facts depending on them _base = [ PlatformFactCollector, DistributionFactCollector, LSBFactCollector ] # type: t.List[t.Type[BaseFactCollector]] # These restrict what is possible in others _restrictive = [ SelinuxFactCollector, ApparmorFactCollector, ChrootFactCollector, FipsFactCollector ] # type: t.List[t.Type[BaseFactCollector]] # general info, not required but probably useful for other facts _general = [ PythonFactCollector, SystemCapabilitiesFactCollector, PkgMgrFactCollector, OpenBSDPkgMgrFactCollector, ServiceMgrFactCollector, CmdLineFactCollector, DateTimeFactCollector, EnvFactCollector, LoadAvgFactCollector, SshPubKeyFactCollector, UserFactCollector, SystemdFactCollector ] # type: t.List[t.Type[BaseFactCollector]] # virtual, this might also limit hardware/networking _virtual = [ VirtualCollector, DragonFlyVirtualCollector, FreeBSDVirtualCollector, LinuxVirtualCollector, OpenBSDVirtualCollector, NetBSDVirtualCollector, SunOSVirtualCollector, HPUXVirtualCollector ] # type: t.List[t.Type[BaseFactCollector]] _hardware = [ HardwareCollector, AIXHardwareCollector, DarwinHardwareCollector, DragonFlyHardwareCollector, FreeBSDHardwareCollector, HPUXHardwareCollector, HurdHardwareCollector, LinuxHardwareCollector, NetBSDHardwareCollector, OpenBSDHardwareCollector, SunOSHardwareCollector ] # type: t.List[t.Type[BaseFactCollector]] _network = [ DnsFactCollector, FcWwnInitiatorFactCollector, NetworkCollector, AIXNetworkCollector, DarwinNetworkCollector, DragonFlyNetworkCollector, FreeBSDNetworkCollector, HPUXNetworkCollector, HurdNetworkCollector, IscsiInitiatorNetworkCollector, NvmeInitiatorNetworkCollector, LinuxNetworkCollector, NetBSDNetworkCollector, OpenBSDNetworkCollector, SunOSNetworkCollector ] # type: t.List[t.Type[BaseFactCollector]] # other fact sources _extra_facts = [ LocalFactCollector, FacterFactCollector, OhaiFactCollector ] # type: t.List[t.Type[BaseFactCollector]] # TODO: make config driven collectors = _base + _restrictive + _general + _virtual + _hardware + _network + _extra_facts
8,376
Python
.py
163
48.920245
93
0.841913
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,242
collector.py
ansible_ansible/lib/ansible/module_utils/facts/collector.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import annotations from collections import defaultdict import platform import ansible.module_utils.compat.typing as t from ansible.module_utils.facts import timeout class CycleFoundInFactDeps(Exception): """Indicates there is a cycle in fact collector deps If collector-B requires collector-A, and collector-A requires collector-B, that is a cycle. In that case, there is no ordering that will satisfy B before A and A and before B. That will cause this error to be raised. """ pass class UnresolvedFactDep(ValueError): pass class CollectorNotFoundError(KeyError): pass class BaseFactCollector: _fact_ids = set() # type: t.Set[str] _platform = 'Generic' name = None # type: str | None required_facts = set() # type: t.Set[str] def __init__(self, collectors=None, namespace=None): """Base class for things that collect facts. 'collectors' is an optional list of other FactCollectors for composing.""" self.collectors = collectors or [] # self.namespace is a object with a 'transform' method that transforms # the name to indicate the namespace (ie, adds a prefix or suffix). self.namespace = namespace self.fact_ids = set([self.name]) self.fact_ids.update(self._fact_ids) @classmethod def platform_match(cls, platform_info): if platform_info.get('system', None) == cls._platform: return cls return None def _transform_name(self, key_name): if self.namespace: return self.namespace.transform(key_name) return key_name def _transform_dict_keys(self, fact_dict): """update a dicts keys to use new names as transformed by self._transform_name""" if fact_dict is None: return {} for old_key in list(fact_dict.keys()): new_key = self._transform_name(old_key) # pop the item by old_key and replace it using new_key fact_dict[new_key] = fact_dict.pop(old_key) return fact_dict # TODO/MAYBE: rename to 'collect' and add 'collect_without_namespace' def collect_with_namespace(self, module=None, collected_facts=None): # collect, then transform the key names if needed facts_dict = self.collect(module=module, collected_facts=collected_facts) if self.namespace: facts_dict = self._transform_dict_keys(facts_dict) return facts_dict def collect(self, module=None, collected_facts=None): """do the fact collection 'collected_facts' is a object (a dict, likely) that holds all previously facts. This is intended to be used if a FactCollector needs to reference another fact (for ex, the system arch) and should not be modified (usually). Returns a dict of facts. """ facts_dict = {} return facts_dict def get_collector_names(valid_subsets=None, minimal_gather_subset=None, gather_subset=None, aliases_map=None, platform_info=None): """return a set of FactCollector names based on gather_subset spec. gather_subset is a spec describing which facts to gather. valid_subsets is a frozenset of potential matches for gather_subset ('all', 'network') etc minimal_gather_subsets is a frozenset of matches to always use, even for gather_subset='!all' """ # Retrieve module parameters gather_subset = gather_subset or ['all'] # the list of everything that 'all' expands to valid_subsets = valid_subsets or frozenset() # if provided, minimal_gather_subset is always added, even after all negations minimal_gather_subset = minimal_gather_subset or frozenset() aliases_map = aliases_map or defaultdict(set) # Retrieve all facts elements additional_subsets = set() exclude_subsets = set() # total always starts with the min set, then # adds of the additions in gather_subset, then # excludes all of the excludes, then add any explicitly # requested subsets. gather_subset_with_min = ['min'] gather_subset_with_min.extend(gather_subset) # subsets we mention in gather_subset explicitly, except for 'all'/'min' explicitly_added = set() for subset in gather_subset_with_min: subset_id = subset if subset_id == 'min': additional_subsets.update(minimal_gather_subset) continue if subset_id == 'all': additional_subsets.update(valid_subsets) continue if subset_id.startswith('!'): subset = subset[1:] if subset == 'min': exclude_subsets.update(minimal_gather_subset) continue if subset == 'all': exclude_subsets.update(valid_subsets - minimal_gather_subset) continue exclude = True else: exclude = False if exclude: # include 'devices', 'dmi' etc for '!hardware' exclude_subsets.update(aliases_map.get(subset, set())) exclude_subsets.add(subset) else: # NOTE: this only considers adding an unknown gather subsetup an error. Asking to # exclude an unknown gather subset is ignored. if subset_id not in valid_subsets: raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(sorted(valid_subsets)))) explicitly_added.add(subset) additional_subsets.add(subset) if not additional_subsets: additional_subsets.update(valid_subsets) additional_subsets.difference_update(exclude_subsets - explicitly_added) return additional_subsets def find_collectors_for_platform(all_collector_classes, compat_platforms): found_collectors = set() found_collectors_names = set() # start from specific platform, then try generic for compat_platform in compat_platforms: platform_match = None for all_collector_class in all_collector_classes: # ask the class if it is compatible with the platform info platform_match = all_collector_class.platform_match(compat_platform) if not platform_match: continue primary_name = all_collector_class.name if primary_name not in found_collectors_names: found_collectors.add(all_collector_class) found_collectors_names.add(all_collector_class.name) return found_collectors def build_fact_id_to_collector_map(collectors_for_platform): fact_id_to_collector_map = defaultdict(list) aliases_map = defaultdict(set) for collector_class in collectors_for_platform: primary_name = collector_class.name fact_id_to_collector_map[primary_name].append(collector_class) for fact_id in collector_class._fact_ids: fact_id_to_collector_map[fact_id].append(collector_class) aliases_map[primary_name].add(fact_id) return fact_id_to_collector_map, aliases_map def select_collector_classes(collector_names, all_fact_subsets): seen_collector_classes = set() selected_collector_classes = [] for collector_name in collector_names: collector_classes = all_fact_subsets.get(collector_name, []) for collector_class in collector_classes: if collector_class not in seen_collector_classes: selected_collector_classes.append(collector_class) seen_collector_classes.add(collector_class) return selected_collector_classes def _get_requires_by_collector_name(collector_name, all_fact_subsets): required_facts = set() try: collector_classes = all_fact_subsets[collector_name] except KeyError: raise CollectorNotFoundError('Fact collector "%s" not found' % collector_name) for collector_class in collector_classes: required_facts.update(collector_class.required_facts) return required_facts def find_unresolved_requires(collector_names, all_fact_subsets): """Find any collector names that have unresolved requires Returns a list of collector names that correspond to collector classes whose .requires_facts() are not in collector_names. """ unresolved = set() for collector_name in collector_names: required_facts = _get_requires_by_collector_name(collector_name, all_fact_subsets) for required_fact in required_facts: if required_fact not in collector_names: unresolved.add(required_fact) return unresolved def resolve_requires(unresolved_requires, all_fact_subsets): new_names = set() failed = [] for unresolved in unresolved_requires: if unresolved in all_fact_subsets: new_names.add(unresolved) else: failed.append(unresolved) if failed: raise UnresolvedFactDep('unresolved fact dep %s' % ','.join(failed)) return new_names def build_dep_data(collector_names, all_fact_subsets): dep_map = defaultdict(set) for collector_name in collector_names: collector_deps = set() for collector in all_fact_subsets[collector_name]: for dep in collector.required_facts: collector_deps.add(dep) dep_map[collector_name] = collector_deps return dep_map def tsort(dep_map): sorted_list = [] unsorted_map = dep_map.copy() while unsorted_map: acyclic = False for node, edges in list(unsorted_map.items()): for edge in edges: if edge in unsorted_map: break else: acyclic = True del unsorted_map[node] sorted_list.append((node, edges)) if not acyclic: raise CycleFoundInFactDeps('Unable to tsort deps, there was a cycle in the graph. sorted=%s' % sorted_list) return sorted_list def _solve_deps(collector_names, all_fact_subsets): unresolved = collector_names.copy() solutions = collector_names.copy() while True: unresolved = find_unresolved_requires(solutions, all_fact_subsets) if unresolved == set(): break new_names = resolve_requires(unresolved, all_fact_subsets) solutions.update(new_names) return solutions def collector_classes_from_gather_subset(all_collector_classes=None, valid_subsets=None, minimal_gather_subset=None, gather_subset=None, gather_timeout=None, platform_info=None): """return a list of collector classes that match the args""" # use gather_name etc to get the list of collectors all_collector_classes = all_collector_classes or [] minimal_gather_subset = minimal_gather_subset or frozenset() platform_info = platform_info or {'system': platform.system()} gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT # tweak the modules GATHER_TIMEOUT timeout.GATHER_TIMEOUT = gather_timeout valid_subsets = valid_subsets or frozenset() # maps alias names like 'hardware' to the list of names that are part of hardware # like 'devices' and 'dmi' aliases_map = defaultdict(set) compat_platforms = [platform_info, {'system': 'Generic'}] collectors_for_platform = find_collectors_for_platform(all_collector_classes, compat_platforms) # all_facts_subsets maps the subset name ('hardware') to the class that provides it. # TODO: name collisions here? are there facts with the same name as a gather_subset (all, network, hardware, virtual, ohai, facter) all_fact_subsets, aliases_map = build_fact_id_to_collector_map(collectors_for_platform) all_valid_subsets = frozenset(all_fact_subsets.keys()) # expand any fact_id/collectorname/gather_subset term ('all', 'env', etc) to the list of names that represents collector_names = get_collector_names(valid_subsets=all_valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=gather_subset, aliases_map=aliases_map, platform_info=platform_info) complete_collector_names = _solve_deps(collector_names, all_fact_subsets) dep_map = build_dep_data(complete_collector_names, all_fact_subsets) ordered_deps = tsort(dep_map) ordered_collector_names = [x[0] for x in ordered_deps] selected_collector_classes = select_collector_classes(ordered_collector_names, all_fact_subsets) return selected_collector_classes
14,716
Python
.py
295
40.820339
135
0.671348
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,243
utils.py
ansible_ansible/lib/ansible/module_utils/facts/utils.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import fcntl import os def get_file_content(path, default=None, strip=True): """ Return the contents of a given file path :args path: path to file to return contents from :args default: value to return if we could not read file :args strip: controls if we strip whitespace from the result or not :returns: String with file contents (optionally stripped) or 'default' value """ data = default if os.path.exists(path) and os.access(path, os.R_OK): datafile = None try: datafile = open(path) try: # try to not enter kernel 'block' mode, which prevents timeouts fd = datafile.fileno() flag = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK) except Exception: pass # not required to operate, but would have been nice! # actually read the data data = datafile.read() if strip: data = data.strip() if len(data) == 0: data = default except Exception: # ignore errors as some jails/containers might have readable permissions but not allow reads pass finally: if datafile is not None: datafile.close() return data def get_file_lines(path, strip=True, line_sep=None): """get list of lines from file""" data = get_file_content(path, strip=strip) if data: if line_sep is None: ret = data.splitlines() else: if len(line_sep) == 1: ret = data.rstrip(line_sep).split(line_sep) else: ret = data.split(line_sep) else: ret = [] return ret def get_mount_size(mountpoint): mount_size = {} try: statvfs_result = os.statvfs(mountpoint) mount_size['size_total'] = statvfs_result.f_frsize * statvfs_result.f_blocks mount_size['size_available'] = statvfs_result.f_frsize * (statvfs_result.f_bavail) # Block total/available/used mount_size['block_size'] = statvfs_result.f_bsize mount_size['block_total'] = statvfs_result.f_blocks mount_size['block_available'] = statvfs_result.f_bavail mount_size['block_used'] = mount_size['block_total'] - mount_size['block_available'] # Inode total/available/used mount_size['inode_total'] = statvfs_result.f_files mount_size['inode_available'] = statvfs_result.f_favail mount_size['inode_used'] = mount_size['inode_total'] - mount_size['inode_available'] except OSError: pass return mount_size
3,419
Python
.py
82
33.487805
104
0.636829
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,244
namespace.py
ansible_ansible/lib/ansible/module_utils/facts/namespace.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import annotations class FactNamespace: def __init__(self, namespace_name): self.namespace_name = namespace_name def transform(self, name): """Take a text name, and transforms it as needed (add a namespace prefix, etc)""" return name def _underscore(self, name): return name.replace('-', '_') class PrefixFactNamespace(FactNamespace): def __init__(self, namespace_name, prefix=None): super(PrefixFactNamespace, self).__init__(namespace_name) self.prefix = prefix def transform(self, name): new_name = self._underscore(name) return '%s%s' % (self.prefix, new_name)
2,313
Python
.py
43
50.674419
92
0.752983
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,245
__init__.py
ansible_ansible/lib/ansible/module_utils/facts/__init__.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import annotations # import from the compat api because 2.0-2.3 had a module_utils.facts.ansible_facts # and get_all_facts in top level namespace from ansible.module_utils.facts.compat import ansible_facts, get_all_facts # noqa
1,890
Python
.py
31
59.903226
92
0.790522
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,246
ansible_collector.py
ansible_ansible/lib/ansible/module_utils/facts/ansible_collector.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import annotations import fnmatch import sys import ansible.module_utils.compat.typing as t from ansible.module_utils.facts import timeout from ansible.module_utils.facts import collector from ansible.module_utils.common.collections import is_string class AnsibleFactCollector(collector.BaseFactCollector): """A FactCollector that returns results under 'ansible_facts' top level key. If a namespace if provided, facts will be collected under that namespace. For ex, a ansible.module_utils.facts.namespace.PrefixFactNamespace(prefix='ansible_') Has a 'from_gather_subset() constructor that populates collectors based on a gather_subset specifier.""" def __init__(self, collectors=None, namespace=None, filter_spec=None): super(AnsibleFactCollector, self).__init__(collectors=collectors, namespace=namespace) self.filter_spec = filter_spec def _filter(self, facts_dict, filter_spec): # assume filter_spec='' or filter_spec=[] is equivalent to filter_spec='*' if not filter_spec or filter_spec == '*': return facts_dict if is_string(filter_spec): filter_spec = [filter_spec] found = [] for f in filter_spec: for x, y in facts_dict.items(): if not f or fnmatch.fnmatch(x, f): found.append((x, y)) elif not f.startswith(('ansible_', 'facter', 'ohai')): # try to match with ansible_ prefix added when non empty g = 'ansible_%s' % f if fnmatch.fnmatch(x, g): found.append((x, y)) return found def collect(self, module=None, collected_facts=None): collected_facts = collected_facts or {} facts_dict = {} for collector_obj in self.collectors: info_dict = {} try: # Note: this collects with namespaces, so collected_facts also includes namespaces info_dict = collector_obj.collect_with_namespace(module=module, collected_facts=collected_facts) except Exception as e: sys.stderr.write(repr(e)) sys.stderr.write('\n') # shallow copy of the new facts to pass to each collector in collected_facts so facts # can reference other facts they depend on. collected_facts.update(info_dict.copy()) # NOTE: If we want complicated fact dict merging, this is where it would hook in facts_dict.update(self._filter(info_dict, self.filter_spec)) return facts_dict class CollectorMetaDataCollector(collector.BaseFactCollector): """Collector that provides a facts with the gather_subset metadata.""" name = 'gather_subset' _fact_ids = set() # type: t.Set[str] def __init__(self, collectors=None, namespace=None, gather_subset=None, module_setup=None): super(CollectorMetaDataCollector, self).__init__(collectors, namespace) self.gather_subset = gather_subset self.module_setup = module_setup def collect(self, module=None, collected_facts=None): meta_facts = {'gather_subset': self.gather_subset} if self.module_setup: meta_facts['module_setup'] = self.module_setup return meta_facts def get_ansible_collector(all_collector_classes, namespace=None, filter_spec=None, gather_subset=None, gather_timeout=None, minimal_gather_subset=None): filter_spec = filter_spec or [] gather_subset = gather_subset or ['all'] gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT minimal_gather_subset = minimal_gather_subset or frozenset() collector_classes = \ collector.collector_classes_from_gather_subset( all_collector_classes=all_collector_classes, minimal_gather_subset=minimal_gather_subset, gather_subset=gather_subset, gather_timeout=gather_timeout) collectors = [] for collector_class in collector_classes: collector_obj = collector_class(namespace=namespace) collectors.append(collector_obj) # Add a collector that knows what gather_subset we used so it it can provide a fact collector_meta_data_collector = \ CollectorMetaDataCollector(gather_subset=gather_subset, module_setup=True) collectors.append(collector_meta_data_collector) fact_collector = \ AnsibleFactCollector(collectors=collectors, filter_spec=filter_spec, namespace=namespace) return fact_collector
6,566
Python
.py
122
44.114754
98
0.669371
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,247
packages.py
ansible_ansible/lib/ansible/module_utils/facts/packages.py
# (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import annotations import ansible.module_utils.compat.typing as t from abc import ABCMeta, abstractmethod from ansible.module_utils.six import with_metaclass from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module from ansible.module_utils.common._utils import get_all_subclasses def get_all_pkg_managers(): return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr, RespawningLibMgr)} class PkgMgr(with_metaclass(ABCMeta, object)): # type: ignore[misc] @abstractmethod def is_available(self, handle_exceptions): # This method is supposed to return True/False if the package manager is currently installed/usable # It can also 'prep' the required systems in the process of detecting availability # If handle_exceptions is false it should raise exceptions related to manager discovery instead of handling them. pass @abstractmethod def list_installed(self): # This method should return a list of installed packages, each list item will be passed to get_package_details pass @abstractmethod def get_package_details(self, package): # This takes a 'package' item and returns a dictionary with the package information, name and version are minimal requirements pass def get_packages(self): # Take all of the above and return a dictionary of lists of dictionaries (package = list of installed versions) installed_packages = {} for package in self.list_installed(): package_details = self.get_package_details(package) if 'source' not in package_details: package_details['source'] = self.__class__.__name__.lower() name = package_details['name'] if name not in installed_packages: installed_packages[name] = [package_details] else: installed_packages[name].append(package_details) return installed_packages class LibMgr(PkgMgr): LIB = None # type: str | None def __init__(self): self._lib = None super(LibMgr, self).__init__() def is_available(self, handle_exceptions=True): found = False try: self._lib = __import__(self.LIB) found = True except ImportError: if not handle_exceptions: raise Exception(missing_required_lib(self.LIB)) return found class RespawningLibMgr(LibMgr): CLI_BINARIES = [] # type: t.List[str] INTERPRETERS = ['/usr/bin/python3'] def is_available(self, handle_exceptions=True): if super(RespawningLibMgr, self).is_available(): return True for binary in self.CLI_BINARIES: try: bin_path = get_bin_path(binary) except ValueError: # Not an interesting exception to raise, just a speculative probe continue else: # It looks like this package manager is installed if not has_respawned(): # See if respawning will help interpreter_path = probe_interpreters_for_module(self.INTERPRETERS, self.LIB) if interpreter_path: respawn_module(interpreter_path) # The module will exit when the respawned copy completes if not handle_exceptions: raise Exception(f'Found executable at {bin_path}. {missing_required_lib(self.LIB)}') if not handle_exceptions: raise Exception(missing_required_lib(self.LIB)) return False class CLIMgr(PkgMgr): CLI = None # type: str | None def __init__(self): self._cli = None super(CLIMgr, self).__init__() def is_available(self, handle_exceptions=True): found = False try: self._cli = get_bin_path(self.CLI) found = True except ValueError: if not handle_exceptions: raise return found
4,410
Python
.py
93
37.580645
134
0.650012
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,248
compat.py
ansible_ansible/lib/ansible/module_utils/facts/compat.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import annotations from ansible.module_utils.facts.namespace import PrefixFactNamespace from ansible.module_utils.facts import default_collectors from ansible.module_utils.facts import ansible_collector def get_all_facts(module): """compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method Expects module to be an instance of AnsibleModule, with a 'gather_subset' param. returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to the fact value.""" gather_subset = module.params['gather_subset'] return ansible_facts(module, gather_subset=gather_subset) def ansible_facts(module, gather_subset=None): """Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method 2.3/2.3 expects a gather_subset arg. 2.0/2.1 does not except a gather_subset arg So make gather_subsets an optional arg, defaulting to configured DEFAULT_GATHER_TIMEOUT 'module' should be an instance of an AnsibleModule. returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to the fact value. """ gather_subset = gather_subset or module.params.get('gather_subset', ['all']) gather_timeout = module.params.get('gather_timeout', 10) filter_spec = module.params.get('filter', '*') minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time', 'distribution', 'dns', 'env', 'fips', 'local', 'lsb', 'pkg_mgr', 'platform', 'python', 'selinux', 'service_mgr', 'ssh_pub_keys', 'user']) all_collector_classes = default_collectors.collectors # don't add a prefix namespace = PrefixFactNamespace(namespace_name='ansible', prefix='') fact_collector = \ ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes, namespace=namespace, filter_spec=filter_spec, gather_subset=gather_subset, gather_timeout=gather_timeout, minimal_gather_subset=minimal_gather_subset) facts_dict = fact_collector.collect(module=module) return facts_dict
4,062
Python
.py
66
53.257576
94
0.696429
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,249
sysctl.py
ansible_ansible/lib/ansible/module_utils/facts/sysctl.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re from ansible.module_utils.common.text.converters import to_text def get_sysctl(module, prefixes): sysctl = dict() sysctl_cmd = module.get_bin_path('sysctl') if sysctl_cmd is not None: cmd = [sysctl_cmd] cmd.extend(prefixes) try: rc, out, err = module.run_command(cmd) except (IOError, OSError) as e: module.warn('Unable to read sysctl: %s' % to_text(e)) rc = 1 if rc == 0: key = '' value = '' for line in out.splitlines(): if not line.strip(): continue if line.startswith(' '): # handle multiline values, they will not have a starting key # Add the newline back in so people can split on it to parse # lines if they need to. value += '\n' + line continue if key: sysctl[key] = value.strip() try: (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1) except Exception as e: module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e))) if key: sysctl[key] = value.strip() return sysctl
2,035
Python
.py
49
31.755102
101
0.588235
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,250
timeout.py
ansible_ansible/lib/ansible/module_utils/facts/timeout.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import multiprocessing import multiprocessing.pool as mp # timeout function to make sure some fact gathering # steps do not exceed a time limit GATHER_TIMEOUT = None DEFAULT_GATHER_TIMEOUT = 10 class TimeoutError(Exception): pass def timeout(seconds=None, error_message="Timer expired"): """ Timeout decorator to expire after a set number of seconds. This raises an ansible.module_utils.facts.TimeoutError if the timeout is hit before the function completes. """ def decorator(func): def wrapper(*args, **kwargs): timeout_value = seconds if timeout_value is None: timeout_value = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT pool = mp.ThreadPool(processes=1) res = pool.apply_async(func, args, kwargs) pool.close() try: return res.get(timeout_value) except multiprocessing.TimeoutError: # This is an ansible.module_utils.common.facts.timeout.TimeoutError raise TimeoutError(f'{error_message} after {timeout_value} seconds') finally: pool.terminate() return wrapper # If we were called as @timeout, then the first parameter will be the # function we are to wrap instead of the number of seconds. Detect this # and correct it by setting seconds to our default value and return the # inner decorator function manually wrapped around the function if callable(seconds): func = seconds seconds = None return decorator(func) # If we were called as @timeout([...]) then python itself will take # care of wrapping the inner decorator around the function return decorator
2,453
Python
.py
56
37.642857
89
0.705956
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,251
fc_wwn.py
ansible_ansible/lib/ansible/module_utils/facts/network/fc_wwn.py
# Fibre Channel WWN initiator related facts collection for ansible. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import sys import glob import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_lines from ansible.module_utils.facts.collector import BaseFactCollector class FcWwnInitiatorFactCollector(BaseFactCollector): name = 'fibre_channel_wwn' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): """ Example contents /sys/class/fc_host/*/port_name: 0x21000014ff52a9bb """ fc_facts = {} fc_facts['fibre_channel_wwn'] = [] if sys.platform.startswith('linux'): for fcfile in glob.glob('/sys/class/fc_host/*/port_name'): for line in get_file_lines(fcfile): fc_facts['fibre_channel_wwn'].append(line.rstrip()[2:]) elif sys.platform.startswith('sunos'): # on solaris 10 or solaris 11 should use `fcinfo hba-port` # TBD (not implemented): on solaris 9 use `prtconf -pv` cmd = module.get_bin_path('fcinfo') if cmd: cmd = cmd + " hba-port" rc, fcinfo_out, err = module.run_command(cmd) # fcinfo hba-port | grep "Port WWN" # HBA Port WWN: 10000090fa1658de if rc == 0 and fcinfo_out: for line in fcinfo_out.splitlines(): if 'Port WWN' in line: data = line.split(' ') fc_facts['fibre_channel_wwn'].append(data[-1].rstrip()) elif sys.platform.startswith('aix'): cmd = module.get_bin_path('lsdev') lscfg_cmd = module.get_bin_path('lscfg') if cmd and lscfg_cmd: # get list of available fibre-channel devices (fcs) cmd = cmd + " -Cc adapter -l fcs*" rc, lsdev_out, err = module.run_command(cmd) if rc == 0 and lsdev_out: for line in lsdev_out.splitlines(): # if device is available (not in defined state), get its WWN if 'Available' in line: data = line.split(' ') cmd = lscfg_cmd + " -vl %s" % data[0] rc, lscfg_out, err = module.run_command(cmd) # example output # lscfg -vpl fcs3 | grep "Network Address" # Network Address.............10000090FA551509 if rc == 0 and lscfg_out: for line in lscfg_out.splitlines(): if 'Network Address' in line: data = line.split('.') fc_facts['fibre_channel_wwn'].append(data[-1].rstrip()) elif sys.platform.startswith('hp-ux'): cmd = module.get_bin_path('ioscan') fcmsu_cmd = module.get_bin_path( 'fcmsutil', opt_dirs=['/opt/fcms/bin'], ) # go ahead if we have both commands available if cmd and fcmsu_cmd: # ioscan / get list of available fibre-channel devices (fcd) cmd = cmd + " -fnC FC" rc, ioscan_out, err = module.run_command(cmd) if rc == 0 and ioscan_out: for line in ioscan_out.splitlines(): line = line.strip() if '/dev/fcd' in line: dev = line.split(' ') # get device information cmd = fcmsu_cmd + " %s" % dev[0] rc, fcmsutil_out, err = module.run_command(cmd) # lookup the following line # N_Port Port World Wide Name = 0x50060b00006975ec if rc == 0 and fcmsutil_out: for line in fcmsutil_out.splitlines(): if 'N_Port Port World Wide Name' in line: data = line.split('=') fc_facts['fibre_channel_wwn'].append(data[-1].strip()) return fc_facts
5,093
Python
.py
99
35.636364
95
0.521669
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,252
linux.py
ansible_ansible/lib/ansible/module_utils/facts/network/linux.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import glob import os import re import socket import struct from ansible.module_utils.facts.network.base import Network, NetworkCollector from ansible.module_utils.facts.utils import get_file_content class LinuxNetwork(Network): """ This is a Linux-specific subclass of Network. It defines - interfaces (a list of interface names) - interface_<name> dictionary of ipv4, ipv6, and mac address information. - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses. - ipv4_address and ipv6_address: the first non-local address for each family. """ platform = 'Linux' INTERFACE_TYPE = { '1': 'ether', '32': 'infiniband', '512': 'ppp', '772': 'loopback', '65534': 'tunnel', } def populate(self, collected_facts=None): network_facts = {} ip_path = self.module.get_bin_path('ip') if ip_path is None: return network_facts default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path, collected_facts=collected_facts) interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6) network_facts['interfaces'] = interfaces.keys() for iface in interfaces: network_facts[iface] = interfaces[iface] network_facts['default_ipv4'] = default_ipv4 network_facts['default_ipv6'] = default_ipv6 network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses'] network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses'] network_facts['locally_reachable_ips'] = self.get_locally_reachable_ips(ip_path) return network_facts # List all `scope host` routes/addresses. # They belong to routes, but it means the whole prefix is reachable # locally, regardless of specific IP addresses. # E.g.: 192.168.0.0/24, any IP address is reachable from this range # if assigned as scope host. def get_locally_reachable_ips(self, ip_path): locally_reachable_ips = dict( ipv4=[], ipv6=[], ) def parse_locally_reachable_ips(output): for line in output.splitlines(): if not line: continue words = line.split() if words[0] != 'local': continue address = words[1] if ":" in address: if address not in locally_reachable_ips['ipv6']: locally_reachable_ips['ipv6'].append(address) else: if address not in locally_reachable_ips['ipv4']: locally_reachable_ips['ipv4'].append(address) args = [ip_path, '-4', 'route', 'show', 'table', 'local'] rc, routes, dummy = self.module.run_command(args) if rc == 0: parse_locally_reachable_ips(routes) args = [ip_path, '-6', 'route', 'show', 'table', 'local'] rc, routes, dummy = self.module.run_command(args) if rc == 0: parse_locally_reachable_ips(routes) return locally_reachable_ips def get_default_interfaces(self, ip_path, collected_facts=None): collected_facts = collected_facts or {} # Use the commands: # ip -4 route get 8.8.8.8 -> Google public DNS # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com # to find out the default outgoing interface, address, and gateway command = dict( v4=[ip_path, '-4', 'route', 'get', '8.8.8.8'], v6=[ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012'] ) interface = dict(v4={}, v6={}) for v in 'v4', 'v6': if (v == 'v6' and collected_facts.get('ansible_os_family') == 'RedHat' and collected_facts.get('ansible_distribution_version', '').startswith('4.')): continue if v == 'v6' and not socket.has_ipv6: continue rc, out, err = self.module.run_command(command[v], errors='surrogate_then_replace') if not out: # v6 routing may result in # RTNETLINK answers: Invalid argument continue words = out.splitlines()[0].split() # A valid output starts with the queried address on the first line if len(words) > 0 and words[0] == command[v][-1]: for i in range(len(words) - 1): if words[i] == 'dev': interface[v]['interface'] = words[i + 1] elif words[i] == 'src': interface[v]['address'] = words[i + 1] elif words[i] == 'via' and words[i + 1] != command[v][-1]: interface[v]['gateway'] = words[i + 1] return interface['v4'], interface['v6'] def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6): interfaces = {} ips = dict( all_ipv4_addresses=[], all_ipv6_addresses=[], ) # FIXME: maybe split into smaller methods? # FIXME: this is pretty much a constructor for path in glob.glob('/sys/class/net/*'): if not os.path.isdir(path): continue device = os.path.basename(path) interfaces[device] = {'device': device} if os.path.exists(os.path.join(path, 'address')): macaddress = get_file_content(os.path.join(path, 'address'), default='') if macaddress and macaddress != '00:00:00:00:00:00': interfaces[device]['macaddress'] = macaddress if os.path.exists(os.path.join(path, 'mtu')): interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu'))) if os.path.exists(os.path.join(path, 'operstate')): interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down' if os.path.exists(os.path.join(path, 'device', 'driver', 'module')): interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module'))) if os.path.exists(os.path.join(path, 'type')): _type = get_file_content(os.path.join(path, 'type')) interfaces[device]['type'] = self.INTERFACE_TYPE.get(_type, 'unknown') if os.path.exists(os.path.join(path, 'bridge')): interfaces[device]['type'] = 'bridge' interfaces[device]['interfaces'] = [os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*'))] if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')): interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='') if os.path.exists(os.path.join(path, 'bridge', 'stp_state')): interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1' if os.path.exists(os.path.join(path, 'bonding')): interfaces[device]['type'] = 'bonding' interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split() interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0] interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0] interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0] primary = get_file_content(os.path.join(path, 'bonding', 'primary')) if primary: interfaces[device]['primary'] = primary path = os.path.join(path, 'bonding', 'all_slaves_active') if os.path.exists(path): interfaces[device]['all_slaves_active'] = get_file_content(path) == '1' if os.path.exists(os.path.join(path, 'bonding_slave')): interfaces[device]['perm_macaddress'] = get_file_content(os.path.join(path, 'bonding_slave', 'perm_hwaddr'), default='') if os.path.exists(os.path.join(path, 'device')): interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path, 'device'))) if os.path.exists(os.path.join(path, 'speed')): speed = get_file_content(os.path.join(path, 'speed')) if speed is not None: interfaces[device]['speed'] = int(speed) # Check whether an interface is in promiscuous mode if os.path.exists(os.path.join(path, 'flags')): promisc_mode = False # The second byte indicates whether the interface is in promiscuous mode. # 1 = promisc # 0 = no promisc data = int(get_file_content(os.path.join(path, 'flags')), 16) promisc_mode = (data & 0x0100 > 0) interfaces[device]['promisc'] = promisc_mode # TODO: determine if this needs to be in a nested scope/closure def parse_ip_output(output, secondary=False): for line in output.splitlines(): if not line: continue words = line.split() broadcast = '' if words[0] == 'inet': if '/' in words[1]: address, netmask_length = words[1].split('/') if len(words) > 3: if words[2] == 'brd': broadcast = words[3] else: # pointopoint interfaces do not have a prefix address = words[1] netmask_length = "32" address_bin = struct.unpack('!L', socket.inet_aton(address))[0] netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length)) netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin)) network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin)) iface = words[-1] # NOTE: device is ref to outside scope # NOTE: interfaces is also ref to outside scope if iface != device: interfaces[iface] = {} if not secondary and "ipv4" not in interfaces[iface]: interfaces[iface]['ipv4'] = {'address': address, 'broadcast': broadcast, 'netmask': netmask, 'network': network, 'prefix': netmask_length, } else: if "ipv4_secondaries" not in interfaces[iface]: interfaces[iface]["ipv4_secondaries"] = [] interfaces[iface]["ipv4_secondaries"].append({ 'address': address, 'broadcast': broadcast, 'netmask': netmask, 'network': network, 'prefix': netmask_length, }) # add this secondary IP to the main device if secondary: if "ipv4_secondaries" not in interfaces[device]: interfaces[device]["ipv4_secondaries"] = [] if device != iface: interfaces[device]["ipv4_secondaries"].append({ 'address': address, 'broadcast': broadcast, 'netmask': netmask, 'network': network, 'prefix': netmask_length, }) # NOTE: default_ipv4 is ref to outside scope # If this is the default address, update default_ipv4 if 'address' in default_ipv4 and default_ipv4['address'] == address: default_ipv4['broadcast'] = broadcast default_ipv4['netmask'] = netmask default_ipv4['network'] = network default_ipv4['prefix'] = netmask_length # NOTE: macaddress is ref from outside scope default_ipv4['macaddress'] = macaddress default_ipv4['mtu'] = interfaces[device]['mtu'] default_ipv4['type'] = interfaces[device].get("type", "unknown") default_ipv4['alias'] = words[-1] if not address.startswith('127.'): ips['all_ipv4_addresses'].append(address) elif words[0] == 'inet6': if 'peer' == words[2]: address = words[1] dummy, prefix = words[3].split('/') scope = words[5] else: address, prefix = words[1].split('/') scope = words[3] if 'ipv6' not in interfaces[device]: interfaces[device]['ipv6'] = [] interfaces[device]['ipv6'].append({ 'address': address, 'prefix': prefix, 'scope': scope }) # If this is the default address, update default_ipv6 if 'address' in default_ipv6 and default_ipv6['address'] == address: default_ipv6['prefix'] = prefix default_ipv6['scope'] = scope default_ipv6['macaddress'] = macaddress default_ipv6['mtu'] = interfaces[device]['mtu'] default_ipv6['type'] = interfaces[device].get("type", "unknown") if not address == '::1': ips['all_ipv6_addresses'].append(address) args = [ip_path, 'addr', 'show', 'primary', 'dev', device] rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace') if rc == 0: parse_ip_output(primary_data) else: # possibly busybox, fallback to running without the "primary" arg # https://github.com/ansible/ansible/issues/50871 args = [ip_path, 'addr', 'show', 'dev', device] rc, data, stderr = self.module.run_command(args, errors='surrogate_then_replace') if rc == 0: parse_ip_output(data) args = [ip_path, 'addr', 'show', 'secondary', 'dev', device] rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace') if rc == 0: parse_ip_output(secondary_data, secondary=True) interfaces[device].update(self.get_ethtool_data(device)) # replace : by _ in interface name since they are hard to use in template new_interfaces = {} # i is a dict key (string) not an index int for i in interfaces: if ':' in i: new_interfaces[i.replace(':', '_')] = interfaces[i] else: new_interfaces[i] = interfaces[i] return new_interfaces, ips def get_ethtool_data(self, device): data = {} ethtool_path = self.module.get_bin_path("ethtool") # FIXME: exit early on falsey ethtool_path and un-indent if ethtool_path: args = [ethtool_path, '-k', device] rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace') # FIXME: exit early on falsey if we can if rc == 0: features = {} for line in stdout.strip().splitlines(): if not line or line.endswith(":"): continue key, value = line.split(": ") if not value: continue features[key.strip().replace('-', '_')] = value.strip() data['features'] = features args = [ethtool_path, '-T', device] rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace') if rc == 0: data['timestamping'] = [m.lower() for m in re.findall(r'SOF_TIMESTAMPING_(\w+)', stdout)] data['hw_timestamp_filters'] = [m.lower() for m in re.findall(r'HWTSTAMP_FILTER_(\w+)', stdout)] m = re.search(r'PTP Hardware Clock: (\d+)', stdout) if m: data['phc_index'] = int(m.groups()[0]) return data class LinuxNetworkCollector(NetworkCollector): _platform = 'Linux' _fact_class = LinuxNetwork required_facts = set(['distribution', 'platform'])
18,549
Python
.py
332
38.593373
136
0.511794
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,253
iscsi.py
ansible_ansible/lib/ansible/module_utils/facts/network/iscsi.py
# iSCSI initiator related facts collection for Ansible. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import sys import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.network.base import NetworkCollector class IscsiInitiatorNetworkCollector(NetworkCollector): name = 'iscsi' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): """ Example of contents of /etc/iscsi/initiatorname.iscsi: ## DO NOT EDIT OR REMOVE THIS FILE! ## If you remove this file, the iSCSI daemon will not start. ## If you change the InitiatorName, existing access control lists ## may reject this initiator. The InitiatorName must be unique ## for each iSCSI initiator. Do NOT duplicate iSCSI InitiatorNames. InitiatorName=iqn.1993-08.org.debian:01:44a42c8ddb8b Example of output from the AIX lsattr command: # lsattr -E -l iscsi0 disc_filename /etc/iscsi/targets Configuration file False disc_policy file Discovery Policy True initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True isns_srvnames auto iSNS Servers IP Addresses True isns_srvports iSNS Servers Port Numbers True max_targets 16 Maximum Targets Allowed True num_cmd_elems 200 Maximum number of commands to queue to driver True Example of output from the HP-UX iscsiutil command: #iscsiutil -l Initiator Name : iqn.1986-03.com.hp:mcel_VMhost3.1f355cf6-e2db-11e0-a999-b44c0aef5537 Initiator Alias : Authentication Method : None CHAP Method : CHAP_UNI Initiator CHAP Name : CHAP Secret : NAS Hostname : NAS Secret : Radius Server Hostname : Header Digest : None, CRC32C (default) Data Digest : None, CRC32C (default) SLP Scope list for iSLPD : """ iscsi_facts = {} iscsi_facts['iscsi_iqn'] = "" if sys.platform.startswith('linux') or sys.platform.startswith('sunos'): for line in get_file_content('/etc/iscsi/initiatorname.iscsi', '').splitlines(): if line.startswith('#') or line.startswith(';') or line.strip() == '': continue if line.startswith('InitiatorName='): iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1] break elif sys.platform.startswith('aix'): cmd = module.get_bin_path('lsattr') if cmd is None: return iscsi_facts cmd += " -E -l iscsi0" rc, out, err = module.run_command(cmd) if rc == 0 and out: line = self.findstr(out, 'initiator_name') iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip() elif sys.platform.startswith('hp-ux'): cmd = module.get_bin_path( 'iscsiutil', opt_dirs=['/opt/iscsi/bin'] ) if cmd is None: return iscsi_facts cmd += " -l" rc, out, err = module.run_command(cmd) if out: line = self.findstr(out, 'Initiator Name') iscsi_facts['iscsi_iqn'] = line.split(":", 1)[1].rstrip() return iscsi_facts def findstr(self, text, match): for line in text.splitlines(): if match in line: found = line return found
4,631
Python
.py
93
40.763441
105
0.577119
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,254
hurd.py
ansible_ansible/lib/ansible/module_utils/facts/network/hurd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os from ansible.module_utils.facts.network.base import Network, NetworkCollector class HurdPfinetNetwork(Network): """ This is a GNU Hurd specific subclass of Network. It use fsysopts to get the ip address and support only pfinet. """ platform = 'GNU' _socket_dir = '/servers/socket/' def assign_network_facts(self, network_facts, fsysopts_path, socket_path): rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path]) # FIXME: build up a interfaces datastructure, then assign into network_facts network_facts['interfaces'] = [] for i in out.split(): if '=' in i and i.startswith('--'): k, v = i.split('=', 1) # remove '--' k = k[2:] if k == 'interface': # remove /dev/ from /dev/eth0 v = v[5:] network_facts['interfaces'].append(v) network_facts[v] = { 'active': True, 'device': v, 'ipv4': {}, 'ipv6': [], } current_if = v elif k == 'address': network_facts[current_if]['ipv4']['address'] = v elif k == 'netmask': network_facts[current_if]['ipv4']['netmask'] = v elif k == 'address6': address, prefix = v.split('/') network_facts[current_if]['ipv6'].append({ 'address': address, 'prefix': prefix, }) return network_facts def populate(self, collected_facts=None): network_facts = {} fsysopts_path = self.module.get_bin_path('fsysopts') if fsysopts_path is None: return network_facts socket_path = None for l in ('inet', 'inet6'): link = os.path.join(self._socket_dir, l) if os.path.exists(link): socket_path = link break if socket_path is None: return network_facts return self.assign_network_facts(network_facts, fsysopts_path, socket_path) class HurdNetworkCollector(NetworkCollector): _platform = 'GNU' _fact_class = HurdPfinetNetwork
3,066
Python
.py
72
31.611111
84
0.569463
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,255
nvme.py
ansible_ansible/lib/ansible/module_utils/facts/network/nvme.py
# NVMe initiator related facts collection for Ansible. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import sys import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.network.base import NetworkCollector class NvmeInitiatorNetworkCollector(NetworkCollector): name = 'nvme' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): """ Currently NVMe is only supported in some Linux distributions. If NVMe is configured on the host then a file will have been created during the NVMe driver installation. This file holds the unique NQN of the host. Example of contents of /etc/nvme/hostnqn: # cat /etc/nvme/hostnqn nqn.2014-08.org.nvmexpress:fc_lif:uuid:2cd61a74-17f9-4c22-b350-3020020c458d """ nvme_facts = {} nvme_facts['hostnqn'] = "" if sys.platform.startswith('linux'): for line in get_file_content('/etc/nvme/hostnqn', '').splitlines(): if line.startswith('#') or line.startswith(';') or line.strip() == '': continue if line.startswith('nqn.'): nvme_facts['hostnqn'] = line break return nvme_facts
1,996
Python
.py
44
39.113636
86
0.692942
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,256
sunos.py
ansible_ansible/lib/ansible/module_utils/facts/network/sunos.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class SunOSNetwork(GenericBsdIfconfigNetwork): """ This is the SunOS Network Class. It uses the GenericBsdIfconfigNetwork. Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface so these facts have been moved inside the 'ipv4' and 'ipv6' lists. """ platform = 'SunOS' # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6. # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface. # 'parse_interface_line()' checks for previously seen interfaces before defining # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa). def get_interfaces_info(self, ifconfig_path): interfaces = {} current_if = {} ips = dict( all_ipv4_addresses=[], all_ipv6_addresses=[], ) rc, out, err = self.module.run_command([ifconfig_path, '-a']) for line in out.splitlines(): if line: words = line.split() if re.match(r'^\S', line) and len(words) > 3: current_if = self.parse_interface_line(words, current_if, interfaces) interfaces[current_if['device']] = current_if elif words[0].startswith('options='): self.parse_options_line(words, current_if, ips) elif words[0] == 'nd6': self.parse_nd6_line(words, current_if, ips) elif words[0] == 'ether': self.parse_ether_line(words, current_if, ips) elif words[0] == 'media:': self.parse_media_line(words, current_if, ips) elif words[0] == 'status:': self.parse_status_line(words, current_if, ips) elif words[0] == 'lladdr': self.parse_lladdr_line(words, current_if, ips) elif words[0] == 'inet': self.parse_inet_line(words, current_if, ips) elif words[0] == 'inet6': self.parse_inet6_line(words, current_if, ips) else: self.parse_unknown_line(words, current_if, ips) # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the # ipv4/ipv6 lists which is ugly and hard to read. # This quick hack merges the dictionaries. Purely cosmetic. for iface in interfaces: for v in 'ipv4', 'ipv6': combined_facts = {} for facts in interfaces[iface][v]: combined_facts.update(facts) if len(combined_facts.keys()) > 0: interfaces[iface][v] = [combined_facts] return interfaces, ips def parse_interface_line(self, words, current_if, interfaces): device = words[0][0:-1] if device not in interfaces: current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} else: current_if = interfaces[device] flags = self.get_options(words[1]) v = 'ipv4' if 'IPv6' in flags: v = 'ipv6' if 'LOOPBACK' in flags: current_if['type'] = 'loopback' current_if[v].append({'flags': flags, 'mtu': words[3]}) current_if['macaddress'] = 'unknown' # will be overwritten later return current_if # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f # Add leading zero to each octet where needed. def parse_ether_line(self, words, current_if, ips): macaddress = '' for octet in words[1].split(':'): octet = ('0' + octet)[-2:None] macaddress += (octet + ':') current_if['macaddress'] = macaddress[0:-1] class SunOSNetworkCollector(NetworkCollector): _fact_class = SunOSNetwork _platform = 'SunOS'
4,752
Python
.py
99
38.070707
90
0.610524
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,257
generic_bsd.py
ansible_ansible/lib/ansible/module_utils/facts/network/generic_bsd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re import socket import struct from ansible.module_utils.facts.network.base import Network class GenericBsdIfconfigNetwork(Network): """ This is a generic BSD subclass of Network using the ifconfig command. It defines - interfaces (a list of interface names) - interface_<name> dictionary of ipv4, ipv6, and mac address information. - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses. """ platform = 'Generic_BSD_Ifconfig' def populate(self, collected_facts=None): network_facts = {} ifconfig_path = self.module.get_bin_path('ifconfig') if ifconfig_path is None: return network_facts route_path = self.module.get_bin_path('route') if route_path is None: return network_facts default_ipv4, default_ipv6 = self.get_default_interfaces(route_path) interfaces, ips = self.get_interfaces_info(ifconfig_path) interfaces = self.detect_type_media(interfaces) self.merge_default_interface(default_ipv4, interfaces, 'ipv4') self.merge_default_interface(default_ipv6, interfaces, 'ipv6') network_facts['interfaces'] = sorted(list(interfaces.keys())) for iface in interfaces: network_facts[iface] = interfaces[iface] network_facts['default_ipv4'] = default_ipv4 network_facts['default_ipv6'] = default_ipv6 network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses'] network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses'] return network_facts def detect_type_media(self, interfaces): for iface in interfaces: if 'media' in interfaces[iface]: if 'ether' in interfaces[iface]['media'].lower(): interfaces[iface]['type'] = 'ether' return interfaces def get_default_interfaces(self, route_path): # Use the commands: # route -n get default # route -n get -inet6 default # to find out the default outgoing interface, address, and gateway command = dict(v4=[route_path, '-n', 'get', 'default'], v6=[route_path, '-n', 'get', '-inet6', 'default']) interface = dict(v4={}, v6={}) for v in 'v4', 'v6': if v == 'v6' and not socket.has_ipv6: continue rc, out, err = self.module.run_command(command[v]) if not out: # v6 routing may result in # RTNETLINK answers: Invalid argument continue for line in out.splitlines(): words = line.strip().split(': ') # Collect output from route command if len(words) > 1: if words[0] == 'interface': interface[v]['interface'] = words[1] if words[0] == 'gateway': interface[v]['gateway'] = words[1] # help pick the right interface address on OpenBSD if words[0] == 'if address': interface[v]['address'] = words[1] # help pick the right interface address on NetBSD if words[0] == 'local addr': interface[v]['address'] = words[1] return interface['v4'], interface['v6'] def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'): interfaces = {} current_if = {} ips = dict( all_ipv4_addresses=[], all_ipv6_addresses=[], ) # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and macOS all implicitly add '-a' # when running the command 'ifconfig'. # Solaris must explicitly run the command 'ifconfig -a'. rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options]) for line in out.splitlines(): if line: words = line.split() if words[0] == 'pass': continue elif re.match(r'^\S', line) and len(words) > 3: current_if = self.parse_interface_line(words) interfaces[current_if['device']] = current_if elif words[0].startswith('options='): self.parse_options_line(words, current_if, ips) elif words[0] == 'nd6': self.parse_nd6_line(words, current_if, ips) elif words[0] == 'ether': self.parse_ether_line(words, current_if, ips) elif words[0] == 'media:': self.parse_media_line(words, current_if, ips) elif words[0] == 'status:': self.parse_status_line(words, current_if, ips) elif words[0] == 'lladdr': self.parse_lladdr_line(words, current_if, ips) elif words[0] == 'inet': self.parse_inet_line(words, current_if, ips) elif words[0] == 'inet6': self.parse_inet6_line(words, current_if, ips) elif words[0] == 'tunnel': self.parse_tunnel_line(words, current_if, ips) else: self.parse_unknown_line(words, current_if, ips) return interfaces, ips def parse_interface_line(self, words): device = words[0][0:-1] current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} current_if['flags'] = self.get_options(words[1]) if 'LOOPBACK' in current_if['flags']: current_if['type'] = 'loopback' current_if['macaddress'] = 'unknown' # will be overwritten later if len(words) >= 5: # Newer FreeBSD versions current_if['metric'] = words[3] current_if['mtu'] = words[5] else: current_if['mtu'] = words[3] return current_if def parse_options_line(self, words, current_if, ips): # Mac has options like this... current_if['options'] = self.get_options(words[0]) def parse_nd6_line(self, words, current_if, ips): # FreeBSD has options like this... current_if['options'] = self.get_options(words[1]) def parse_ether_line(self, words, current_if, ips): current_if['macaddress'] = words[1] current_if['type'] = 'ether' def parse_media_line(self, words, current_if, ips): # not sure if this is useful - we also drop information current_if['media'] = words[1] if len(words) > 2: current_if['media_select'] = words[2] if len(words) > 3: current_if['media_type'] = words[3][1:] if len(words) > 4: current_if['media_options'] = self.get_options(words[4]) def parse_status_line(self, words, current_if, ips): current_if['status'] = words[1] def parse_lladdr_line(self, words, current_if, ips): current_if['lladdr'] = words[1] def parse_inet_line(self, words, current_if, ips): # netbsd show aliases like this # lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184 # inet 127.0.0.1 netmask 0xff000000 # inet alias 127.1.1.1 netmask 0xff000000 if words[1] == 'alias': del words[1] address = {'address': words[1]} # cidr style ip address (eg, 127.0.0.1/24) in inet line # used in netbsd ifconfig -e output after 7.1 if '/' in address['address']: ip_address, cidr_mask = address['address'].split('/') address['address'] = ip_address netmask_length = int(cidr_mask) netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length)) address['netmask'] = socket.inet_ntoa(struct.pack('!L', netmask_bin)) if len(words) > 5: address['broadcast'] = words[3] else: # Don't just assume columns, use "netmask" as the index for the prior column try: netmask_idx = words.index('netmask') + 1 except ValueError: netmask_idx = 3 # deal with hex netmask if re.match('([0-9a-f]){8}$', words[netmask_idx]): netmask = '0x' + words[netmask_idx] else: netmask = words[netmask_idx] if netmask.startswith('0x'): address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(netmask, base=16))) else: # otherwise assume this is a dotted quad address['netmask'] = netmask # calculate the network address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0] netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0] address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin)) if 'broadcast' not in address: # broadcast may be given or we need to calculate try: broadcast_idx = words.index('broadcast') + 1 except ValueError: address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff))) else: address['broadcast'] = words[broadcast_idx] # add to our list of addresses if not words[1].startswith('127.'): ips['all_ipv4_addresses'].append(address['address']) current_if['ipv4'].append(address) def parse_inet6_line(self, words, current_if, ips): address = {'address': words[1]} # using cidr style addresses, ala NetBSD ifconfig post 7.1 if '/' in address['address']: ip_address, cidr_mask = address['address'].split('/') address['address'] = ip_address address['prefix'] = cidr_mask if len(words) > 5: address['scope'] = words[5] else: if (len(words) >= 4) and (words[2] == 'prefixlen'): address['prefix'] = words[3] if (len(words) >= 6) and (words[4] == 'scopeid'): address['scope'] = words[5] localhost6 = ['::1', '::1/128', 'fe80::1%lo0'] if address['address'] not in localhost6: ips['all_ipv6_addresses'].append(address['address']) current_if['ipv6'].append(address) def parse_tunnel_line(self, words, current_if, ips): current_if['type'] = 'tunnel' def parse_unknown_line(self, words, current_if, ips): # we are going to ignore unknown lines here - this may be # a bad idea - but you can override it in your subclass pass # TODO: these are module scope static function candidates # (most of the class is really...) def get_options(self, option_string): start = option_string.find('<') + 1 end = option_string.rfind('>') if (start > 0) and (end > 0) and (end > start + 1): option_csv = option_string[start:end] return option_csv.split(',') else: return [] def merge_default_interface(self, defaults, interfaces, ip_type): if 'interface' not in defaults: return if not defaults['interface'] in interfaces: return ifinfo = interfaces[defaults['interface']] # copy all the interface values across except addresses for item in ifinfo: if item != 'ipv4' and item != 'ipv6': defaults[item] = ifinfo[item] ipinfo = [] if 'address' in defaults: ipinfo = [x for x in ifinfo[ip_type] if x['address'] == defaults['address']] if len(ipinfo) == 0: ipinfo = ifinfo[ip_type] if len(ipinfo) > 0: for item in ipinfo[0]: defaults[item] = ipinfo[0][item]
12,595
Python
.py
263
36.532319
117
0.571324
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,258
base.py
ansible_ansible/lib/ansible/module_utils/facts/network/base.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector class Network: """ This is a generic Network subclass of Facts. This should be further subclassed to implement per platform. If you subclass this, you must define: - interfaces (a list of interface names) - interface_<name> dictionary of ipv4, ipv6, and mac address information. All subclasses MUST define platform. """ platform = 'Generic' # FIXME: remove load_on_init when we can def __init__(self, module, load_on_init=False): self.module = module # TODO: more or less abstract/NotImplemented def populate(self, collected_facts=None): return {} class NetworkCollector(BaseFactCollector): # MAYBE: we could try to build this based on the arch specific implementation of Network() or its kin name = 'network' _fact_class = Network _fact_ids = set(['interfaces', 'default_ipv4', 'default_ipv6', 'all_ipv4_addresses', 'all_ipv6_addresses']) # type: t.Set[str] IPV6_SCOPE = {'0': 'global', '10': 'host', '20': 'link', '40': 'admin', '50': 'site', '80': 'organization'} def collect(self, module=None, collected_facts=None): collected_facts = collected_facts or {} if not module: return {} # Network munges cached_facts by side effect, so give it a copy facts_obj = self._fact_class(module) facts_dict = facts_obj.populate(collected_facts=collected_facts) return facts_dict
2,415
Python
.py
56
36.035714
105
0.663396
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,259
freebsd.py
ansible_ansible/lib/ansible/module_utils/facts/network/freebsd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class FreeBSDNetwork(GenericBsdIfconfigNetwork): """ This is the FreeBSD Network Class. It uses the GenericBsdIfconfigNetwork unchanged. """ platform = 'FreeBSD' class FreeBSDNetworkCollector(NetworkCollector): _fact_class = FreeBSDNetwork _platform = 'FreeBSD'
1,137
Python
.py
26
41.423077
84
0.788235
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,260
aix.py
ansible_ansible/lib/ansible/module_utils/facts/network/aix.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class AIXNetwork(GenericBsdIfconfigNetwork): """ This is the AIX Network Class. It uses the GenericBsdIfconfigNetwork unchanged. """ platform = 'AIX' def get_default_interfaces(self, route_path): interface = dict(v4={}, v6={}) netstat_path = self.module.get_bin_path('netstat') if netstat_path is None: return interface['v4'], interface['v6'] rc, out, err = self.module.run_command([netstat_path, '-nr']) lines = out.splitlines() for line in lines: words = line.split() if len(words) > 1 and words[0] == 'default': if '.' in words[1]: interface['v4']['gateway'] = words[1] interface['v4']['interface'] = words[5] elif ':' in words[1]: interface['v6']['gateway'] = words[1] interface['v6']['interface'] = words[5] return interface['v4'], interface['v6'] # AIX 'ifconfig -a' does not have three words in the interface line def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'): interfaces = {} current_if = {} ips = dict( all_ipv4_addresses=[], all_ipv6_addresses=[], ) uname_rc = uname_out = uname_err = None uname_path = self.module.get_bin_path('uname') if uname_path: uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W']) rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options]) for line in out.splitlines(): if line: words = line.split() # only this condition differs from GenericBsdIfconfigNetwork if re.match(r'^\w*\d*:', line): current_if = self.parse_interface_line(words) interfaces[current_if['device']] = current_if elif words[0].startswith('options='): self.parse_options_line(words, current_if, ips) elif words[0] == 'nd6': self.parse_nd6_line(words, current_if, ips) elif words[0] == 'ether': self.parse_ether_line(words, current_if, ips) elif words[0] == 'media:': self.parse_media_line(words, current_if, ips) elif words[0] == 'status:': self.parse_status_line(words, current_if, ips) elif words[0] == 'lladdr': self.parse_lladdr_line(words, current_if, ips) elif words[0] == 'inet': self.parse_inet_line(words, current_if, ips) elif words[0] == 'inet6': self.parse_inet6_line(words, current_if, ips) else: self.parse_unknown_line(words, current_if, ips) # don't bother with wpars it does not work # zero means not in wpar if not uname_rc and uname_out.split()[0] == '0': if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']): entstat_path = self.module.get_bin_path('entstat') if entstat_path: rc, out, err = self.module.run_command([entstat_path, current_if['device']]) if rc != 0: break for line in out.splitlines(): if not line: pass buff = re.match('^Hardware Address: (.*)', line) if buff: current_if['macaddress'] = buff.group(1) buff = re.match('^Device Type:', line) if buff and re.match('.*Ethernet', line): current_if['type'] = 'ether' # device must have mtu attribute in ODM if 'mtu' not in current_if: lsattr_path = self.module.get_bin_path('lsattr') if lsattr_path: rc, out, err = self.module.run_command([lsattr_path, '-El', current_if['device']]) if rc != 0: break for line in out.splitlines(): if line: words = line.split() if words[0] == 'mtu': current_if['mtu'] = words[1] return interfaces, ips # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here def parse_interface_line(self, words): device = words[0][0:-1] current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} current_if['flags'] = self.get_options(words[1]) current_if['macaddress'] = 'unknown' # will be overwritten later return current_if class AIXNetworkCollector(NetworkCollector): _fact_class = AIXNetwork _platform = 'AIX'
5,988
Python
.py
120
35.708333
106
0.537896
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,261
dragonfly.py
ansible_ansible/lib/ansible/module_utils/facts/network/dragonfly.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class DragonFlyNetwork(GenericBsdIfconfigNetwork): """ This is the DragonFly Network Class. It uses the GenericBsdIfconfigNetwork unchanged. """ platform = 'DragonFly' class DragonFlyNetworkCollector(NetworkCollector): _fact_class = DragonFlyNetwork _platform = 'DragonFly'
1,149
Python
.py
26
41.884615
84
0.79051
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,262
netbsd.py
ansible_ansible/lib/ansible/module_utils/facts/network/netbsd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class NetBSDNetwork(GenericBsdIfconfigNetwork): """ This is the NetBSD Network Class. It uses the GenericBsdIfconfigNetwork """ platform = 'NetBSD' def parse_media_line(self, words, current_if, ips): # example of line: # $ ifconfig # ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500 # ec_capabilities=1<VLAN_MTU> # ec_enabled=0 # address: 00:20:91:45:00:78 # media: Ethernet 10baseT full-duplex # inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255 current_if['media'] = words[1] if len(words) > 2: current_if['media_type'] = words[2] if len(words) > 3: current_if['media_options'] = words[3].split(',') class NetBSDNetworkCollector(NetworkCollector): _fact_class = NetBSDNetwork _platform = 'NetBSD'
1,748
Python
.py
40
38.925
85
0.711346
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,263
hpux.py
ansible_ansible/lib/ansible/module_utils/facts/network/hpux.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.network.base import Network, NetworkCollector class HPUXNetwork(Network): """ HP-UX-specific subclass of Network. Defines networking facts: - default_interface - interfaces (a list of interface names) - interface_<name> dictionary of ipv4 address information. """ platform = 'HP-UX' def populate(self, collected_facts=None): network_facts = {} netstat_path = self.module.get_bin_path( 'netstat', opt_dirs=['/usr/bin'] ) if netstat_path is None: return network_facts default_interfaces_facts = self.get_default_interfaces() network_facts.update(default_interfaces_facts) interfaces = self.get_interfaces_info() network_facts['interfaces'] = interfaces.keys() for iface in interfaces: network_facts[iface] = interfaces[iface] return network_facts def get_default_interfaces(self): default_interfaces = {} netstat_path = self.module.get_bin_path( 'netstat', opt_dirs=['/usr/bin'] ) if netstat_path is None: return default_interfaces rc, out, err = self.module.run_command("%s -nr" % netstat_path) lines = out.splitlines() for line in lines: words = line.split() if len(words) > 1: if words[0] == 'default': default_interfaces['default_interface'] = words[4] default_interfaces['default_gateway'] = words[1] return default_interfaces def get_interfaces_info(self): interfaces = {} netstat_path = self.module.get_bin_path( 'netstat', opt_dirs=['/usr/bin'] ) if netstat_path is None: return interfaces rc, out, err = self.module.run_command("%s -niw" % netstat_path) lines = out.splitlines() for line in lines: words = line.split() for i in range(len(words) - 1): if words[i][:3] == 'lan': device = words[i] interfaces[device] = {'device': device} address = words[i + 3] interfaces[device]['ipv4'] = {'address': address} network = words[i + 2] interfaces[device]['ipv4'] = {'network': network, 'interface': device, 'address': address} return interfaces class HPUXNetworkCollector(NetworkCollector): _fact_class = HPUXNetwork _platform = 'HP-UX'
3,390
Python
.py
82
31.414634
77
0.599028
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,264
darwin.py
ansible_ansible/lib/ansible/module_utils/facts/network/darwin.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class DarwinNetwork(GenericBsdIfconfigNetwork): """ This is the Mac macOS Darwin Network Class. It uses the GenericBsdIfconfigNetwork unchanged """ platform = 'Darwin' # media line is different to the default FreeBSD one def parse_media_line(self, words, current_if, ips): # not sure if this is useful - we also drop information current_if['media'] = 'Unknown' # Mac does not give us this current_if['media_select'] = words[1] if len(words) > 2: # MacOSX sets the media to '<unknown type>' for bridge interface # and parsing splits this into two words; this if/else helps if words[1] == '<unknown' and words[2] == 'type>': current_if['media_select'] = 'Unknown' current_if['media_type'] = 'unknown type' else: current_if['media_type'] = words[2][1:-1] if len(words) > 3: current_if['media_options'] = self.get_options(words[3]) class DarwinNetworkCollector(NetworkCollector): _fact_class = DarwinNetwork _platform = 'Darwin'
1,958
Python
.py
41
42.097561
84
0.698953
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,265
openbsd.py
ansible_ansible/lib/ansible/module_utils/facts/network/openbsd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork class OpenBSDNetwork(GenericBsdIfconfigNetwork): """ This is the OpenBSD Network Class. It uses the GenericBsdIfconfigNetwork. """ platform = 'OpenBSD' # OpenBSD 'ifconfig -a' does not have information about aliases def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'): return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options) # Return macaddress instead of lladdr def parse_lladdr_line(self, words, current_if, ips): current_if['macaddress'] = words[1] current_if['type'] = 'ether' class OpenBSDNetworkCollector(NetworkCollector): _fact_class = OpenBSDNetwork _platform = 'OpenBSD'
1,547
Python
.py
33
43.575758
95
0.764276
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,266
python.py
ansible_ansible/lib/ansible/module_utils/facts/system/python.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import sys import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector try: # Check if we have SSLContext support from ssl import create_default_context, SSLContext del create_default_context del SSLContext HAS_SSLCONTEXT = True except ImportError: HAS_SSLCONTEXT = False class PythonFactCollector(BaseFactCollector): name = 'python' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): python_facts = {} python_facts['python'] = { 'version': { 'major': sys.version_info[0], 'minor': sys.version_info[1], 'micro': sys.version_info[2], 'releaselevel': sys.version_info[3], 'serial': sys.version_info[4] }, 'version_info': list(sys.version_info), 'executable': sys.executable, 'has_sslcontext': HAS_SSLCONTEXT } try: python_facts['python']['type'] = sys.subversion[0] except AttributeError: try: python_facts['python']['type'] = sys.implementation.name except AttributeError: python_facts['python']['type'] = None return python_facts
2,014
Python
.py
51
32.568627
72
0.663082
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,267
local.py
ansible_ansible/lib/ansible/module_utils/facts/system/local.py
# Copyright: Contributors to the Ansible project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import glob import json import os import stat import ansible.module_utils.compat.typing as t from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector from ansible.module_utils.six.moves import configparser, StringIO class LocalFactCollector(BaseFactCollector): name = 'local' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): local_facts = {} local_facts['local'] = {} if not module: return local_facts fact_path = module.params.get('fact_path', None) if not fact_path or not os.path.exists(fact_path): return local_facts local = {} # go over .fact files, run executables, read rest, skip bad with warning and note for fn in sorted(glob.glob(fact_path + '/*.fact')): # use filename for key where it will sit under local facts fact_base = os.path.basename(fn).replace('.fact', '') failed = None try: executable_fact = stat.S_IXUSR & os.stat(fn)[stat.ST_MODE] except OSError as e: failed = 'Could not stat fact (%s): %s' % (fn, to_text(e)) local[fact_base] = failed module.warn(failed) continue if executable_fact: try: # run it rc, out, err = module.run_command(fn) if rc != 0: failed = 'Failure executing fact script (%s), rc: %s, err: %s' % (fn, rc, err) except (IOError, OSError) as e: failed = 'Could not execute fact script (%s): %s' % (fn, to_text(e)) if failed is not None: local[fact_base] = failed module.warn(failed) continue else: # ignores exceptions and returns empty out = get_file_content(fn, default='') try: # ensure we have unicode out = to_text(out, errors='surrogate_or_strict') except UnicodeError: fact = 'error loading fact - output of running "%s" was not utf-8' % fn local[fact_base] = fact module.warn(fact) continue # try to read it as json first try: fact = json.loads(out) except ValueError: # if that fails read it with ConfigParser cp = configparser.ConfigParser() try: cp.read_file(StringIO(out)) except configparser.Error: fact = f"error loading facts as JSON or ini - please check content: {fn}" module.warn(fact) else: fact = {} for sect in cp.sections(): if sect not in fact: fact[sect] = {} for opt in cp.options(sect): try: val = cp.get(sect, opt) except configparser.Error as ex: fact = f"error loading facts as ini - please check content: {fn} ({ex})" module.warn(fact) continue else: fact[sect][opt] = val except Exception as e: fact = "Failed to convert (%s) to JSON: %s" % (fn, to_text(e)) module.warn(fact) local[fact_base] = fact local_facts['local'] = local return local_facts
4,045
Python
.py
90
29.855556
104
0.513074
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,268
distribution.py
ansible_ansible/lib/ansible/module_utils/facts/system/distribution.py
# -*- coding: utf-8 -*- # Copyright: (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import os import platform import re import ansible.module_utils.compat.typing as t from ansible.module_utils.common.sys_info import get_distribution, get_distribution_version, \ get_distribution_codename from ansible.module_utils.facts.utils import get_file_content, get_file_lines from ansible.module_utils.facts.collector import BaseFactCollector def get_uname(module, flags=('-v')): if isinstance(flags, str): flags = flags.split() command = ['uname'] command.extend(flags) rc, out, err = module.run_command(command) if rc == 0: return out return None def _file_exists(path, allow_empty=False): # not finding the file, exit early if not os.path.isfile(path): return False # if just the path needs to exists (ie, it can be empty) we are done if allow_empty: return True # file exists but is empty and we dont allow_empty if os.path.getsize(path) == 0: return False # file exists with some content return True class DistributionFiles: """has-a various distro file parsers (os-release, etc) and logic for finding the right one.""" # every distribution name mentioned here, must have one of # - allowempty == True # - be listed in SEARCH_STRING # - have a function get_distribution_DISTNAME implemented # keep names in sync with Conditionals page of docs OSDIST_LIST = ( {'path': '/etc/altlinux-release', 'name': 'Altlinux'}, {'path': '/etc/oracle-release', 'name': 'OracleLinux'}, {'path': '/etc/slackware-version', 'name': 'Slackware'}, {'path': '/etc/centos-release', 'name': 'CentOS'}, {'path': '/etc/redhat-release', 'name': 'RedHat'}, {'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True}, {'path': '/etc/openwrt_release', 'name': 'OpenWrt'}, {'path': '/etc/os-release', 'name': 'Amazon'}, {'path': '/etc/system-release', 'name': 'Amazon'}, {'path': '/etc/alpine-release', 'name': 'Alpine'}, {'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True}, {'path': '/etc/os-release', 'name': 'Archlinux'}, {'path': '/etc/os-release', 'name': 'SUSE'}, {'path': '/etc/SuSE-release', 'name': 'SUSE'}, {'path': '/etc/gentoo-release', 'name': 'Gentoo'}, {'path': '/etc/os-release', 'name': 'Debian'}, {'path': '/etc/lsb-release', 'name': 'Debian'}, {'path': '/etc/lsb-release', 'name': 'Mandriva'}, {'path': '/etc/sourcemage-release', 'name': 'SMGL'}, {'path': '/usr/lib/os-release', 'name': 'ClearLinux'}, {'path': '/etc/coreos/update.conf', 'name': 'Coreos'}, {'path': '/etc/os-release', 'name': 'Flatcar'}, {'path': '/etc/os-release', 'name': 'NA'}, ) SEARCH_STRING = { 'OracleLinux': 'Oracle Linux', 'RedHat': 'Red Hat', 'Altlinux': 'ALT', 'SMGL': 'Source Mage GNU/Linux', } # We can't include this in SEARCH_STRING because a name match on its keys # causes a fallback to using the first whitespace separated item from the file content # as the name. For os-release, that is in form 'NAME=Arch' OS_RELEASE_ALIAS = { 'Archlinux': 'Arch Linux' } STRIP_QUOTES = r'\'\"\\' def __init__(self, module): self.module = module def _get_file_content(self, path): return get_file_content(path) def _get_dist_file_content(self, path, allow_empty=False): # cant find that dist file or it is incorrectly empty if not _file_exists(path, allow_empty=allow_empty): return False, None data = self._get_file_content(path) return True, data def _parse_dist_file(self, name, dist_file_content, path, collected_facts): dist_file_dict = {} dist_file_content = dist_file_content.strip(DistributionFiles.STRIP_QUOTES) if name in self.SEARCH_STRING: # look for the distribution string in the data and replace according to RELEASE_NAME_MAP # only the distribution name is set, the version is assumed to be correct from distro.linux_distribution() if self.SEARCH_STRING[name] in dist_file_content: # this sets distribution=RedHat if 'Red Hat' shows up in data dist_file_dict['distribution'] = name dist_file_dict['distribution_file_search_string'] = self.SEARCH_STRING[name] else: # this sets distribution to what's in the data, e.g. CentOS, Scientific, ... dist_file_dict['distribution'] = dist_file_content.split()[0] return True, dist_file_dict if name in self.OS_RELEASE_ALIAS: if self.OS_RELEASE_ALIAS[name] in dist_file_content: dist_file_dict['distribution'] = name return True, dist_file_dict return False, dist_file_dict # call a dedicated function for parsing the file content # TODO: replace with a map or a class try: # FIXME: most of these dont actually look at the dist file contents, but random other stuff distfunc_name = 'parse_distribution_file_' + name distfunc = getattr(self, distfunc_name) parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts) return parsed, dist_file_dict except AttributeError as exc: self.module.debug('exc: %s' % exc) # this should never happen, but if it does fail quietly and not with a traceback return False, dist_file_dict return True, dist_file_dict # to debug multiple matching release files, one can use: # self.facts['distribution_debug'].append({path + ' ' + name: # (parsed, # self.facts['distribution'], # self.facts['distribution_version'], # self.facts['distribution_release'], # )}) def _guess_distribution(self): # try to find out which linux distribution this is dist = (get_distribution(), get_distribution_version(), get_distribution_codename()) distribution_guess = { 'distribution': dist[0] or 'NA', 'distribution_version': dist[1] or 'NA', # distribution_release can be the empty string 'distribution_release': 'NA' if dist[2] is None else dist[2] } distribution_guess['distribution_major_version'] = distribution_guess['distribution_version'].split('.')[0] or 'NA' return distribution_guess def process_dist_files(self): # Try to handle the exceptions now ... # self.facts['distribution_debug'] = [] dist_file_facts = {} dist_guess = self._guess_distribution() dist_file_facts.update(dist_guess) for ddict in self.OSDIST_LIST: name = ddict['name'] path = ddict['path'] allow_empty = ddict.get('allowempty', False) has_dist_file, dist_file_content = self._get_dist_file_content(path, allow_empty=allow_empty) # but we allow_empty. For example, ArchLinux with an empty /etc/arch-release and a # /etc/os-release with a different name if has_dist_file and allow_empty: dist_file_facts['distribution'] = name dist_file_facts['distribution_file_path'] = path dist_file_facts['distribution_file_variety'] = name break if not has_dist_file: # keep looking continue parsed_dist_file, parsed_dist_file_facts = self._parse_dist_file(name, dist_file_content, path, dist_file_facts) # finally found the right os dist file and were able to parse it if parsed_dist_file: dist_file_facts['distribution'] = name dist_file_facts['distribution_file_path'] = path # distribution and file_variety are the same here, but distribution # will be changed/mapped to a more specific name. # ie, dist=Fedora, file_variety=RedHat dist_file_facts['distribution_file_variety'] = name dist_file_facts['distribution_file_parsed'] = parsed_dist_file dist_file_facts.update(parsed_dist_file_facts) break return dist_file_facts # TODO: FIXME: split distro file parsing into its own module or class def parse_distribution_file_Slackware(self, name, data, path, collected_facts): slackware_facts = {} if 'Slackware' not in data: return False, slackware_facts # TODO: remove slackware_facts['distribution'] = name version = re.findall(r'\w+[.]\w+\+?', data) if version: slackware_facts['distribution_version'] = version[0] return True, slackware_facts def parse_distribution_file_Amazon(self, name, data, path, collected_facts): amazon_facts = {} if 'Amazon' not in data: return False, amazon_facts amazon_facts['distribution'] = 'Amazon' if path == '/etc/os-release': version = re.search(r"VERSION_ID=\"(.*)\"", data) if version: distribution_version = version.group(1) amazon_facts['distribution_version'] = distribution_version version_data = distribution_version.split(".") if len(version_data) > 1: major, minor = version_data else: major, minor = version_data[0], 'NA' amazon_facts['distribution_major_version'] = major amazon_facts['distribution_minor_version'] = minor else: version = [n for n in data.split() if n.isdigit()] version = version[0] if version else 'NA' amazon_facts['distribution_version'] = version return True, amazon_facts def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts): openwrt_facts = {} if 'OpenWrt' not in data: return False, openwrt_facts # TODO: remove openwrt_facts['distribution'] = name version = re.search('DISTRIB_RELEASE="(.*)"', data) if version: openwrt_facts['distribution_version'] = version.groups()[0] release = re.search('DISTRIB_CODENAME="(.*)"', data) if release: openwrt_facts['distribution_release'] = release.groups()[0] return True, openwrt_facts def parse_distribution_file_Alpine(self, name, data, path, collected_facts): alpine_facts = {} alpine_facts['distribution'] = 'Alpine' alpine_facts['distribution_version'] = data return True, alpine_facts def parse_distribution_file_SUSE(self, name, data, path, collected_facts): suse_facts = {} if 'suse' not in data.lower(): return False, suse_facts # TODO: remove if tested without this if path == '/etc/os-release': for line in data.splitlines(): distribution = re.search("^NAME=(.*)", line) if distribution: suse_facts['distribution'] = distribution.group(1).strip('"') # example pattern are 13.04 13.0 13 distribution_version = re.search(r'^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line) if distribution_version: suse_facts['distribution_version'] = distribution_version.group(1) suse_facts['distribution_major_version'] = distribution_version.group(1).split('.')[0] if 'open' in data.lower(): release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) if release: suse_facts['distribution_release'] = release.groups()[0] elif 'enterprise' in data.lower() and 'VERSION_ID' in line: # SLES doesn't got funny release names release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) if release.group(1): release = release.group(1) else: release = "0" # no minor number, so it is the first release suse_facts['distribution_release'] = release elif path == '/etc/SuSE-release': if 'open' in data.lower(): data = data.splitlines() distdata = get_file_content(path).splitlines()[0] suse_facts['distribution'] = distdata.split()[0] for line in data: release = re.search('CODENAME *= *([^\n]+)', line) if release: suse_facts['distribution_release'] = release.groups()[0].strip() elif 'enterprise' in data.lower(): lines = data.splitlines() distribution = lines[0].split()[0] if "Server" in data: suse_facts['distribution'] = "SLES" elif "Desktop" in data: suse_facts['distribution'] = "SLED" for line in lines: release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names if release: suse_facts['distribution_release'] = release.group(1) suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1) # See https://www.suse.com/support/kb/doc/?id=000019341 for SLES for SAP if os.path.islink('/etc/products.d/baseproduct') and os.path.realpath('/etc/products.d/baseproduct').endswith('SLES_SAP.prod'): suse_facts['distribution'] = 'SLES_SAP' return True, suse_facts def parse_distribution_file_Debian(self, name, data, path, collected_facts): debian_facts = {} if 'Debian' in data or 'Raspbian' in data: debian_facts['distribution'] = 'Debian' release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) if release: debian_facts['distribution_release'] = release.groups()[0] # Last resort: try to find release from tzdata as either lsb is missing or this is very old debian if collected_facts['distribution_release'] == 'NA' and 'Debian' in data: dpkg_cmd = self.module.get_bin_path('dpkg') if dpkg_cmd: cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd rc, out, err = self.module.run_command(cmd) if rc == 0: debian_facts['distribution_release'] = out.strip() debian_version_path = '/etc/debian_version' distdata = get_file_lines(debian_version_path) for line in distdata: m = re.search(r'(\d+)\.(\d+)', line.strip()) if m: debian_facts['distribution_minor_version'] = m.groups()[1] elif 'Ubuntu' in data: debian_facts['distribution'] = 'Ubuntu' # nothing else to do, Ubuntu gets correct info from python functions elif 'SteamOS' in data: debian_facts['distribution'] = 'SteamOS' # nothing else to do, SteamOS gets correct info from python functions elif path in ('/etc/lsb-release', '/etc/os-release') and ('Kali' in data or 'Parrot' in data): if 'Kali' in data: # Kali does not provide /etc/lsb-release anymore debian_facts['distribution'] = 'Kali' elif 'Parrot' in data: debian_facts['distribution'] = 'Parrot' release = re.search('DISTRIB_RELEASE=(.*)', data) if release: debian_facts['distribution_release'] = release.groups()[0] elif 'Devuan' in data: debian_facts['distribution'] = 'Devuan' release = re.search(r"PRETTY_NAME=\"?[^(\"]+ \(?([^) \"]+)\)?", data) if release: debian_facts['distribution_release'] = release.groups()[0] version = re.search(r"VERSION_ID=\"(.*)\"", data) if version: debian_facts['distribution_version'] = version.group(1) debian_facts['distribution_major_version'] = version.group(1) elif 'Cumulus' in data: debian_facts['distribution'] = 'Cumulus Linux' version = re.search(r"VERSION_ID=(.*)", data) if version: major, _minor, _dummy_ver = version.group(1).split(".") debian_facts['distribution_version'] = version.group(1) debian_facts['distribution_major_version'] = major release = re.search(r'VERSION="(.*)"', data) if release: debian_facts['distribution_release'] = release.groups()[0] elif "Mint" in data: debian_facts['distribution'] = 'Linux Mint' version = re.search(r"VERSION_ID=\"(.*)\"", data) if version: debian_facts['distribution_version'] = version.group(1) debian_facts['distribution_major_version'] = version.group(1).split('.')[0] elif 'UOS' in data or 'Uos' in data or 'uos' in data: debian_facts['distribution'] = 'Uos' release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data) if release: debian_facts['distribution_release'] = release.groups()[0] version = re.search(r"VERSION_ID=\"(.*)\"", data) if version: debian_facts['distribution_version'] = version.group(1) debian_facts['distribution_major_version'] = version.group(1).split('.')[0] elif 'Deepin' in data or 'deepin' in data: debian_facts['distribution'] = 'Deepin' release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data) if release: debian_facts['distribution_release'] = release.groups()[0] version = re.search(r"VERSION_ID=\"(.*)\"", data) if version: debian_facts['distribution_version'] = version.group(1) debian_facts['distribution_major_version'] = version.group(1).split('.')[0] else: return False, debian_facts return True, debian_facts def parse_distribution_file_Mandriva(self, name, data, path, collected_facts): mandriva_facts = {} if 'Mandriva' in data: mandriva_facts['distribution'] = 'Mandriva' version = re.search('DISTRIB_RELEASE="(.*)"', data) if version: mandriva_facts['distribution_version'] = version.groups()[0] release = re.search('DISTRIB_CODENAME="(.*)"', data) if release: mandriva_facts['distribution_release'] = release.groups()[0] mandriva_facts['distribution'] = name else: return False, mandriva_facts return True, mandriva_facts def parse_distribution_file_NA(self, name, data, path, collected_facts): na_facts = {} for line in data.splitlines(): distribution = re.search("^NAME=(.*)", line) if distribution and name == 'NA': na_facts['distribution'] = distribution.group(1).strip('"') version = re.search("^VERSION=(.*)", line) if version and collected_facts['distribution_version'] == 'NA': na_facts['distribution_version'] = version.group(1).strip('"') return True, na_facts def parse_distribution_file_Coreos(self, name, data, path, collected_facts): coreos_facts = {} # FIXME: pass in ro copy of facts for this kind of thing distro = get_distribution() if distro.lower() == 'coreos': if not data: # include fix from #15230, #15228 # TODO: verify this is ok for above bugs return False, coreos_facts release = re.search("^GROUP=(.*)", data) if release: coreos_facts['distribution_release'] = release.group(1).strip('"') else: return False, coreos_facts # TODO: remove if tested without this return True, coreos_facts def parse_distribution_file_Flatcar(self, name, data, path, collected_facts): flatcar_facts = {} distro = get_distribution() if distro.lower() != 'flatcar': return False, flatcar_facts if not data: return False, flatcar_facts version = re.search("VERSION=(.*)", data) if version: flatcar_facts['distribution_major_version'] = version.group(1).strip('"').split('.')[0] flatcar_facts['distribution_version'] = version.group(1).strip('"') return True, flatcar_facts def parse_distribution_file_ClearLinux(self, name, data, path, collected_facts): clear_facts = {} if "clearlinux" not in name.lower(): return False, clear_facts pname = re.search('NAME="(.*)"', data) if pname: if 'Clear Linux' not in pname.groups()[0]: return False, clear_facts clear_facts['distribution'] = pname.groups()[0] version = re.search('VERSION_ID=(.*)', data) if version: clear_facts['distribution_major_version'] = version.groups()[0] clear_facts['distribution_version'] = version.groups()[0] release = re.search('ID=(.*)', data) if release: clear_facts['distribution_release'] = release.groups()[0] return True, clear_facts def parse_distribution_file_CentOS(self, name, data, path, collected_facts): centos_facts = {} if 'CentOS Stream' in data: centos_facts['distribution_release'] = 'Stream' return True, centos_facts if "TencentOS Server" in data: centos_facts['distribution'] = 'TencentOS' return True, centos_facts return False, centos_facts class Distribution(object): """ This subclass of Facts fills the distribution, distribution_version and distribution_release variables To do so it checks the existence and content of typical files in /etc containing distribution information This is unit tested. Please extend the tests to cover all distributions if you have them available. """ # keep keys in sync with Conditionals page of docs OS_FAMILY_MAP = {'RedHat': ['RedHat', 'RHEL', 'Fedora', 'CentOS', 'Scientific', 'SLC', 'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS', 'OEL', 'Amazon', 'Amzn', 'Virtuozzo', 'XenServer', 'Alibaba', 'EulerOS', 'openEuler', 'AlmaLinux', 'Rocky', 'TencentOS', 'EuroLinux', 'Kylin Linux Advanced Server', 'MIRACLE'], 'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon', 'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux', 'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC'], 'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed', 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite', 'SL-Micro'], 'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'], 'Mandrake': ['Mandrake', 'Mandriva'], 'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'], 'Slackware': ['Slackware'], 'Altlinux': ['Altlinux'], 'SMGL': ['SMGL'], 'Gentoo': ['Gentoo', 'Funtoo'], 'Alpine': ['Alpine'], 'AIX': ['AIX'], 'HP-UX': ['HPUX'], 'Darwin': ['MacOSX'], 'FreeBSD': ['FreeBSD', 'TrueOS'], 'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix'], 'DragonFly': ['DragonflyBSD', 'DragonFlyBSD', 'Gentoo/DragonflyBSD', 'Gentoo/DragonFlyBSD'], 'NetBSD': ['NetBSD'], } OS_FAMILY = {} for family, names in OS_FAMILY_MAP.items(): for name in names: OS_FAMILY[name] = family def __init__(self, module): self.module = module def get_distribution_facts(self): distribution_facts = {} # The platform module provides information about the running # system/distribution. Use this as a baseline and fix buggy systems # afterwards system = platform.system() distribution_facts['distribution'] = system distribution_facts['distribution_release'] = platform.release() distribution_facts['distribution_version'] = platform.version() systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD') if system in systems_implemented: cleanedname = system.replace('-', '') distfunc = getattr(self, 'get_distribution_' + cleanedname) dist_func_facts = distfunc() distribution_facts.update(dist_func_facts) elif system == 'Linux': distribution_files = DistributionFiles(module=self.module) # linux_distribution_facts = LinuxDistribution(module).get_distribution_facts() dist_file_facts = distribution_files.process_dist_files() distribution_facts.update(dist_file_facts) distro = distribution_facts['distribution'] # look for a os family alias for the 'distribution', if there isnt one, use 'distribution' distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro return distribution_facts def get_distribution_AIX(self): aix_facts = {} rc, out, err = self.module.run_command("/usr/bin/oslevel") data = out.split('.') aix_facts['distribution_major_version'] = data[0] if len(data) > 1: aix_facts['distribution_version'] = '%s.%s' % (data[0], data[1]) aix_facts['distribution_release'] = data[1] else: aix_facts['distribution_version'] = data[0] return aix_facts def get_distribution_HPUX(self): hpux_facts = {} rc, out, err = self.module.run_command(r"/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True) data = re.search(r'HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out) if data: hpux_facts['distribution_version'] = data.groups()[0] hpux_facts['distribution_release'] = data.groups()[1] return hpux_facts def get_distribution_Darwin(self): darwin_facts = {} darwin_facts['distribution'] = 'MacOSX' rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion") data = out.split()[-1] if data: darwin_facts['distribution_major_version'] = data.split('.')[0] darwin_facts['distribution_version'] = data return darwin_facts def get_distribution_FreeBSD(self): freebsd_facts = {} freebsd_facts['distribution_release'] = platform.release() data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT|RC|PRERELEASE).*', freebsd_facts['distribution_release']) if 'trueos' in platform.version(): freebsd_facts['distribution'] = 'TrueOS' if data: freebsd_facts['distribution_major_version'] = data.group(1) freebsd_facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2)) return freebsd_facts def get_distribution_OpenBSD(self): openbsd_facts = {} openbsd_facts['distribution_version'] = platform.release() rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version") match = re.match(r'OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out) if match: openbsd_facts['distribution_release'] = match.groups()[0] else: openbsd_facts['distribution_release'] = 'release' return openbsd_facts def get_distribution_DragonFly(self): dragonfly_facts = { 'distribution_release': platform.release() } rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version") match = re.search(r'v(\d+)\.(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', out) if match: dragonfly_facts['distribution_major_version'] = match.group(1) dragonfly_facts['distribution_version'] = '%s.%s.%s' % match.groups()[:3] return dragonfly_facts def get_distribution_NetBSD(self): netbsd_facts = {} platform_release = platform.release() netbsd_facts['distribution_release'] = platform_release rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version") match = re.match(r'NetBSD\s(\d+)\.(\d+)\s\((GENERIC)\).*', out) if match: netbsd_facts['distribution_major_version'] = match.group(1) netbsd_facts['distribution_version'] = '%s.%s' % match.groups()[:2] else: netbsd_facts['distribution_major_version'] = platform_release.split('.')[0] netbsd_facts['distribution_version'] = platform_release return netbsd_facts def get_distribution_SMGL(self): smgl_facts = {} smgl_facts['distribution'] = 'Source Mage GNU/Linux' return smgl_facts def get_distribution_SunOS(self): sunos_facts = {} data = get_file_content('/etc/release').splitlines()[0] if 'Solaris' in data: # for solaris 10 uname_r will contain 5.10, for solaris 11 it will have 5.11 uname_r = get_uname(self.module, flags=['-r']) ora_prefix = '' if 'Oracle Solaris' in data: data = data.replace('Oracle ', '') ora_prefix = 'Oracle ' sunos_facts['distribution'] = data.split()[0] sunos_facts['distribution_version'] = data.split()[1] sunos_facts['distribution_release'] = ora_prefix + data sunos_facts['distribution_major_version'] = uname_r.split('.')[1].rstrip() return sunos_facts uname_v = get_uname(self.module, flags=['-v']) distribution_version = None if 'SmartOS' in data: sunos_facts['distribution'] = 'SmartOS' if _file_exists('/etc/product'): product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l]) if 'Image' in product_data: distribution_version = product_data.get('Image').split()[-1] elif 'OpenIndiana' in data: sunos_facts['distribution'] = 'OpenIndiana' elif 'OmniOS' in data: sunos_facts['distribution'] = 'OmniOS' distribution_version = data.split()[-1] elif uname_v is not None and 'NexentaOS_' in uname_v: sunos_facts['distribution'] = 'Nexenta' distribution_version = data.split()[-1].lstrip('v') if sunos_facts.get('distribution', '') in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'): sunos_facts['distribution_release'] = data.strip() if distribution_version is not None: sunos_facts['distribution_version'] = distribution_version elif uname_v is not None: sunos_facts['distribution_version'] = uname_v.splitlines()[0].strip() return sunos_facts return sunos_facts class DistributionFactCollector(BaseFactCollector): name = 'distribution' _fact_ids = set(['distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']) # type: t.Set[str] def collect(self, module=None, collected_facts=None): collected_facts = collected_facts or {} facts_dict = {} if not module: return facts_dict distribution = Distribution(module=module) distro_facts = distribution.get_distribution_facts() return distro_facts
32,759
Python
.py
621
40.805153
135
0.578698
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,269
user.py
ansible_ansible/lib/ansible/module_utils/facts/system/user.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import getpass import os import pwd import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector class UserFactCollector(BaseFactCollector): name = 'user' _fact_ids = set(['user_id', 'user_uid', 'user_gid', 'user_gecos', 'user_dir', 'user_shell', 'real_user_id', 'effective_user_id', 'effective_group_ids']) # type: t.Set[str] def collect(self, module=None, collected_facts=None): user_facts = {} user_facts['user_id'] = getpass.getuser() try: pwent = pwd.getpwnam(getpass.getuser()) except KeyError: pwent = pwd.getpwuid(os.getuid()) user_facts['user_uid'] = pwent.pw_uid user_facts['user_gid'] = pwent.pw_gid user_facts['user_gecos'] = pwent.pw_gecos user_facts['user_dir'] = pwent.pw_dir user_facts['user_shell'] = pwent.pw_shell user_facts['real_user_id'] = os.getuid() user_facts['effective_user_id'] = os.geteuid() user_facts['real_group_id'] = os.getgid() user_facts['effective_group_id'] = os.getgid() return user_facts
1,891
Python
.py
43
37.813953
70
0.670659
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,270
service_mgr.py
ansible_ansible/lib/ansible/module_utils/facts/system/service_mgr.py
# Collect facts related to system service manager and init. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os import platform import re import ansible.module_utils.compat.typing as t from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector # The distutils module is not shipped with SUNWPython on Solaris. # It's in the SUNWPython-devel package which also contains development files # that don't belong on production boxes. Since our Solaris code doesn't # depend on LooseVersion, do not import it on Solaris. if platform.system() != 'SunOS': from ansible.module_utils.compat.version import LooseVersion class ServiceMgrFactCollector(BaseFactCollector): name = 'service_mgr' _fact_ids = set() # type: t.Set[str] required_facts = set(['platform', 'distribution']) @staticmethod def is_systemd_managed(module): # tools must be installed if module.get_bin_path('systemctl'): # this should show if systemd is the boot init system, if checking init failed to mark as systemd # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]: if os.path.exists(canary): return True return False @staticmethod def is_systemd_managed_offline(module): # tools must be installed if module.get_bin_path('systemctl'): # check if /sbin/init is a symlink to systemd # on SUSE, /sbin/init may be missing if systemd-sysvinit package is not installed. if os.path.islink('/sbin/init') and os.path.basename(os.readlink('/sbin/init')) == 'systemd': return True return False def collect(self, module=None, collected_facts=None): facts_dict = {} if not module: return facts_dict collected_facts = collected_facts or {} service_mgr_name = None # TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc # also other OSs other than linux might need to check across several possible candidates # Mapping of proc_1 values to more useful names proc_1_map = { 'procd': 'openwrt_init', 'runit-init': 'runit', 'svscan': 'svc', 'openrc-init': 'openrc', } # try various forms of querying pid 1 proc_1 = get_file_content('/proc/1/comm') if proc_1 is None: rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) # if command fails, or stdout is empty string or the output of the command starts with what looks like a PID, # then the 'ps' command probably didn't work the way we wanted, probably because it's busybox if rc != 0 or not proc_1.strip() or re.match(r' *[0-9]+ ', proc_1): proc_1 = None # The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity if proc_1 == "COMMAND\n": proc_1 = None if proc_1 is None and os.path.islink('/sbin/init'): proc_1 = os.readlink('/sbin/init') if proc_1 is not None: proc_1 = os.path.basename(proc_1) proc_1 = to_native(proc_1) proc_1 = proc_1.strip() if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')): # many systems return init, so this cannot be trusted, if it ends in 'sh' it probably is a shell in a container proc_1 = None # if not init/None it should be an identifiable or custom init, so we are done! if proc_1 is not None: # Lookup proc_1 value in map and use proc_1 value itself if no match service_mgr_name = proc_1_map.get(proc_1, proc_1) # start with the easy ones elif collected_facts.get('ansible_distribution', None) == 'MacOSX': # FIXME: find way to query executable, version matching is not ideal if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'): service_mgr_name = 'launchd' else: service_mgr_name = 'systemstarter' elif 'BSD' in collected_facts.get('ansible_system', '') or collected_facts.get('ansible_system') in ['Bitrig', 'DragonFly']: # FIXME: we might want to break out to individual BSDs or 'rc' service_mgr_name = 'bsdinit' elif collected_facts.get('ansible_system') == 'AIX': service_mgr_name = 'src' elif collected_facts.get('ansible_system') == 'SunOS': service_mgr_name = 'smf' elif collected_facts.get('ansible_distribution') == 'OpenWrt': service_mgr_name = 'openwrt_init' elif collected_facts.get('ansible_distribution') == 'SMGL': service_mgr_name = 'simpleinit_msb' elif collected_facts.get('ansible_system') == 'Linux': # FIXME: mv is_systemd_managed if self.is_systemd_managed(module=module): service_mgr_name = 'systemd' elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"): service_mgr_name = 'upstart' elif os.path.exists('/sbin/openrc'): service_mgr_name = 'openrc' elif self.is_systemd_managed_offline(module=module): service_mgr_name = 'systemd' elif os.path.exists('/etc/init.d/'): service_mgr_name = 'sysvinit' elif os.path.exists('/etc/dinit.d/'): service_mgr_name = 'dinit' if not service_mgr_name: # if we cannot detect, fallback to generic 'service' service_mgr_name = 'service' facts_dict['service_mgr'] = service_mgr_name return facts_dict
6,695
Python
.py
129
42.75969
132
0.639297
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,271
cmdline.py
ansible_ansible/lib/ansible/module_utils/facts/system/cmdline.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import shlex import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector class CmdLineFactCollector(BaseFactCollector): name = 'cmdline' _fact_ids = set() # type: t.Set[str] def _get_proc_cmdline(self): return get_file_content('/proc/cmdline') def _parse_proc_cmdline(self, data): cmdline_dict = {} try: for piece in shlex.split(data, posix=False): item = piece.split('=', 1) if len(item) == 1: cmdline_dict[item[0]] = True else: cmdline_dict[item[0]] = item[1] except ValueError: pass return cmdline_dict def _parse_proc_cmdline_facts(self, data): cmdline_dict = {} try: for piece in shlex.split(data, posix=False): item = piece.split('=', 1) if len(item) == 1: cmdline_dict[item[0]] = True else: if item[0] in cmdline_dict: if isinstance(cmdline_dict[item[0]], list): cmdline_dict[item[0]].append(item[1]) else: new_list = [cmdline_dict[item[0]], item[1]] cmdline_dict[item[0]] = new_list else: cmdline_dict[item[0]] = item[1] except ValueError: pass return cmdline_dict def collect(self, module=None, collected_facts=None): cmdline_facts = {} data = self._get_proc_cmdline() if not data: return cmdline_facts cmdline_facts['cmdline'] = self._parse_proc_cmdline(data) cmdline_facts['proc_cmdline'] = self._parse_proc_cmdline_facts(data) return cmdline_facts
2,637
Python
.py
63
31.761905
76
0.599922
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,272
loadavg.py
ansible_ansible/lib/ansible/module_utils/facts/system/loadavg.py
# (c) 2021 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import os import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector class LoadAvgFactCollector(BaseFactCollector): name = 'loadavg' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): facts = {} try: # (0.58, 0.82, 0.98) loadavg = os.getloadavg() facts['loadavg'] = { '1m': loadavg[0], '5m': loadavg[1], '15m': loadavg[2] } except OSError: pass return facts
769
Python
.py
22
26.681818
92
0.599459
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,273
pkg_mgr.py
ansible_ansible/lib/ansible/module_utils/facts/system/pkg_mgr.py
# Collect facts related to the system package manager # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import os import subprocess import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector # A list of dicts. If there is a platform with more than one # package manager, put the preferred one last. If there is an # ansible module, use that as the value for the 'name' key. PKG_MGRS = [{'path': '/usr/bin/rpm-ostree', 'name': 'atomic_container'}, # NOTE the `path` key for dnf/dnf5 is effectively discarded when matched for Red Hat OS family, # special logic to infer the default `pkg_mgr` is used in `PkgMgrFactCollector._check_rh_versions()` # leaving them here so a list of package modules can be constructed by iterating over `name` keys {'path': '/usr/bin/yum', 'name': 'dnf'}, {'path': '/usr/bin/dnf-3', 'name': 'dnf'}, {'path': '/usr/bin/dnf5', 'name': 'dnf5'}, {'path': '/usr/bin/apt-get', 'name': 'apt'}, {'path': '/usr/bin/zypper', 'name': 'zypper'}, {'path': '/usr/sbin/urpmi', 'name': 'urpmi'}, {'path': '/usr/bin/pacman', 'name': 'pacman'}, {'path': '/bin/opkg', 'name': 'opkg'}, {'path': '/usr/pkg/bin/pkgin', 'name': 'pkgin'}, {'path': '/opt/local/bin/pkgin', 'name': 'pkgin'}, {'path': '/opt/tools/bin/pkgin', 'name': 'pkgin'}, {'path': '/opt/local/bin/port', 'name': 'macports'}, {'path': '/usr/local/bin/brew', 'name': 'homebrew'}, {'path': '/opt/homebrew/bin/brew', 'name': 'homebrew'}, {'path': '/sbin/apk', 'name': 'apk'}, {'path': '/usr/sbin/pkg', 'name': 'pkgng'}, {'path': '/usr/sbin/swlist', 'name': 'swdepot'}, {'path': '/usr/bin/emerge', 'name': 'portage'}, {'path': '/usr/sbin/pkgadd', 'name': 'svr4pkg'}, {'path': '/usr/bin/pkg', 'name': 'pkg5'}, {'path': '/usr/bin/xbps-install', 'name': 'xbps'}, {'path': '/usr/local/sbin/pkg', 'name': 'pkgng'}, {'path': '/usr/bin/swupd', 'name': 'swupd'}, {'path': '/usr/sbin/sorcery', 'name': 'sorcery'}, {'path': '/usr/bin/installp', 'name': 'installp'}, ] class OpenBSDPkgMgrFactCollector(BaseFactCollector): name = 'pkg_mgr' _fact_ids = set() # type: t.Set[str] _platform = 'OpenBSD' def collect(self, module=None, collected_facts=None): return {'pkg_mgr': 'openbsd_pkg'} # the fact ends up being 'pkg_mgr' so stick with that naming/spelling class PkgMgrFactCollector(BaseFactCollector): name = 'pkg_mgr' _fact_ids = set() # type: t.Set[str] _platform = 'Generic' required_facts = set(['distribution']) def __init__(self, *args, **kwargs): super(PkgMgrFactCollector, self).__init__(*args, **kwargs) self._default_unknown_pkg_mgr = 'unknown' def _check_rh_versions(self): if os.path.exists('/run/ostree-booted'): return "atomic_container" # Since /usr/bin/dnf and /usr/bin/microdnf can point to different versions of dnf in different distributions # the only way to infer the default package manager is to look at the binary they are pointing to. # /usr/bin/microdnf is likely used only in fedora minimal container so /usr/bin/dnf takes precedence for bin_path in ('/usr/bin/dnf', '/usr/bin/microdnf'): if os.path.exists(bin_path): return 'dnf5' if os.path.realpath(bin_path) == '/usr/bin/dnf5' else 'dnf' return self._default_unknown_pkg_mgr def _check_apt_flavor(self, pkg_mgr_name): # Check if '/usr/bin/apt' is APT-RPM or an ordinary (dpkg-based) APT. # There's rpm package on Debian, so checking if /usr/bin/rpm exists # is not enough. Instead ask RPM if /usr/bin/apt-get belongs to some # RPM package. rpm_query = '/usr/bin/rpm -q --whatprovides /usr/bin/apt-get'.split() if os.path.exists('/usr/bin/rpm'): with open(os.devnull, 'w') as null: try: subprocess.check_call(rpm_query, stdout=null, stderr=null) pkg_mgr_name = 'apt_rpm' except subprocess.CalledProcessError: # No apt-get in RPM database. Looks like Debian/Ubuntu # with rpm package installed pkg_mgr_name = 'apt' return pkg_mgr_name def pkg_mgrs(self, collected_facts): # Filter out the /usr/bin/pkg because on Altlinux it is actually the # perl-Package (not Solaris package manager). # Since the pkg5 takes precedence over apt, this workaround # is required to select the suitable package manager on Altlinux. if collected_facts['ansible_os_family'] == 'Altlinux': return filter(lambda pkg: pkg['path'] != '/usr/bin/pkg', PKG_MGRS) else: return PKG_MGRS def collect(self, module=None, collected_facts=None): collected_facts = collected_facts or {} pkg_mgr_name = self._default_unknown_pkg_mgr for pkg in self.pkg_mgrs(collected_facts): if os.path.exists(pkg['path']): pkg_mgr_name = pkg['name'] # Handle distro family defaults when more than one package manager is # installed or available to the distro, the ansible_fact entry should be # the default package manager officially supported by the distro. if collected_facts['ansible_os_family'] == "RedHat": pkg_mgr_name = self._check_rh_versions() elif collected_facts['ansible_os_family'] == 'Debian' and pkg_mgr_name != 'apt': # It's possible to install dnf, zypper, rpm, etc inside of # Debian. Doing so does not mean the system wants to use them. pkg_mgr_name = 'apt' elif collected_facts['ansible_os_family'] == 'Altlinux': if pkg_mgr_name == 'apt': pkg_mgr_name = 'apt_rpm' # Check if /usr/bin/apt-get is ordinary (dpkg-based) APT or APT-RPM if pkg_mgr_name == 'apt': pkg_mgr_name = self._check_apt_flavor(pkg_mgr_name) return {'pkg_mgr': pkg_mgr_name}
6,404
Python
.py
112
47.071429
116
0.598437
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,274
lsb.py
ansible_ansible/lib/ansible/module_utils/facts/system/lsb.py
# Collect facts related to LSB (Linux Standard Base) # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_lines from ansible.module_utils.facts.collector import BaseFactCollector class LSBFactCollector(BaseFactCollector): name = 'lsb' _fact_ids = set() # type: t.Set[str] STRIP_QUOTES = r'\'\"\\' def _lsb_release_bin(self, lsb_path, module): lsb_facts = {} if not lsb_path: return lsb_facts rc, out, err = module.run_command([lsb_path, "-a"], errors='surrogate_then_replace') if rc != 0: return lsb_facts for line in out.splitlines(): if len(line) < 1 or ':' not in line: continue value = line.split(':', 1)[1].strip() if 'LSB Version:' in line: lsb_facts['release'] = value elif 'Distributor ID:' in line: lsb_facts['id'] = value elif 'Description:' in line: lsb_facts['description'] = value elif 'Release:' in line: lsb_facts['release'] = value elif 'Codename:' in line: lsb_facts['codename'] = value return lsb_facts def _lsb_release_file(self, etc_lsb_release_location): lsb_facts = {} if not os.path.exists(etc_lsb_release_location): return lsb_facts for line in get_file_lines(etc_lsb_release_location): value = line.split('=', 1)[1].strip() if 'DISTRIB_ID' in line: lsb_facts['id'] = value elif 'DISTRIB_RELEASE' in line: lsb_facts['release'] = value elif 'DISTRIB_DESCRIPTION' in line: lsb_facts['description'] = value elif 'DISTRIB_CODENAME' in line: lsb_facts['codename'] = value return lsb_facts def collect(self, module=None, collected_facts=None): facts_dict = {} lsb_facts = {} if not module: return facts_dict lsb_path = module.get_bin_path('lsb_release') # try the 'lsb_release' script first if lsb_path: lsb_facts = self._lsb_release_bin(lsb_path, module=module) # no lsb_release, try looking in /etc/lsb-release if not lsb_facts: lsb_facts = self._lsb_release_file('/etc/lsb-release') if lsb_facts and 'release' in lsb_facts: lsb_facts['major_release'] = lsb_facts['release'].split('.')[0] for k, v in lsb_facts.items(): if v: lsb_facts[k] = v.strip(LSBFactCollector.STRIP_QUOTES) facts_dict['lsb'] = lsb_facts return facts_dict
3,483
Python
.py
82
33.097561
92
0.603377
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,275
platform.py
ansible_ansible/lib/ansible/module_utils/facts/system/platform.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re import socket import platform import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector # i86pc is a Solaris and derivatives-ism SOLARIS_I86_RE_PATTERN = r'i([3456]86|86pc)' solaris_i86_re = re.compile(SOLARIS_I86_RE_PATTERN) class PlatformFactCollector(BaseFactCollector): name = 'platform' _fact_ids = set(['system', 'kernel', 'kernel_version', 'machine', 'python_version', 'architecture', 'machine_id']) # type: t.Set[str] def collect(self, module=None, collected_facts=None): platform_facts = {} # platform.system() can be Linux, Darwin, Java, or Windows platform_facts['system'] = platform.system() platform_facts['kernel'] = platform.release() platform_facts['kernel_version'] = platform.version() platform_facts['machine'] = platform.machine() platform_facts['python_version'] = platform.python_version() platform_facts['fqdn'] = socket.getfqdn() platform_facts['hostname'] = platform.node().split('.')[0] platform_facts['nodename'] = platform.node() platform_facts['domain'] = '.'.join(platform_facts['fqdn'].split('.')[1:]) arch_bits = platform.architecture()[0] platform_facts['userspace_bits'] = arch_bits.replace('bit', '') if platform_facts['machine'] == 'x86_64': platform_facts['architecture'] = platform_facts['machine'] if platform_facts['userspace_bits'] == '64': platform_facts['userspace_architecture'] = 'x86_64' elif platform_facts['userspace_bits'] == '32': platform_facts['userspace_architecture'] = 'i386' elif solaris_i86_re.search(platform_facts['machine']): platform_facts['architecture'] = 'i386' if platform_facts['userspace_bits'] == '64': platform_facts['userspace_architecture'] = 'x86_64' elif platform_facts['userspace_bits'] == '32': platform_facts['userspace_architecture'] = 'i386' else: platform_facts['architecture'] = platform_facts['machine'] if platform_facts['system'] == 'AIX': # Attempt to use getconf to figure out architecture # fall back to bootinfo if needed getconf_bin = module.get_bin_path('getconf') if getconf_bin: rc, out, err = module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE']) data = out.splitlines() platform_facts['architecture'] = data[0] else: bootinfo_bin = module.get_bin_path('bootinfo') rc, out, err = module.run_command([bootinfo_bin, '-p']) data = out.splitlines() platform_facts['architecture'] = data[0] elif platform_facts['system'] == 'OpenBSD': platform_facts['architecture'] = platform.uname()[5] machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id") if machine_id: machine_id = machine_id.splitlines()[0] platform_facts["machine_id"] = machine_id return platform_facts
4,090
Python
.py
81
41.111111
104
0.631263
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,276
env.py
ansible_ansible/lib/ansible/module_utils/facts/system/env.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os import ansible.module_utils.compat.typing as t from ansible.module_utils.six import iteritems from ansible.module_utils.facts.collector import BaseFactCollector class EnvFactCollector(BaseFactCollector): name = 'env' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): env_facts = {} env_facts['env'] = {} for k, v in iteritems(os.environ): env_facts['env'][k] = v return env_facts
1,185
Python
.py
28
38.964286
70
0.740192
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,277
chroot.py
ansible_ansible/lib/ansible/module_utils/facts/system/chroot.py
# Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import os import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector def is_chroot(module=None): is_chroot = None if os.environ.get('debian_chroot', False): is_chroot = True else: my_root = os.stat('/') try: # check if my file system is the root one proc_root = os.stat('/proc/1/root/.') is_chroot = my_root.st_ino != proc_root.st_ino or my_root.st_dev != proc_root.st_dev except Exception: # I'm not root or no proc, fallback to checking it is inode #2 fs_root_ino = 2 if module is not None: stat_path = module.get_bin_path('stat') if stat_path: cmd = [stat_path, '-f', '--format=%T', '/'] rc, out, err = module.run_command(cmd) if 'btrfs' in out: fs_root_ino = 256 elif 'xfs' in out: fs_root_ino = 128 is_chroot = (my_root.st_ino != fs_root_ino) return is_chroot class ChrootFactCollector(BaseFactCollector): name = 'chroot' _fact_ids = set(['is_chroot']) # type: t.Set[str] def collect(self, module=None, collected_facts=None): return {'is_chroot': is_chroot(module)}
1,517
Python
.py
35
33.057143
96
0.576583
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,278
selinux.py
ansible_ansible/lib/ansible/module_utils/facts/system/selinux.py
# Collect facts related to selinux # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector try: from ansible.module_utils.compat import selinux HAVE_SELINUX = True except ImportError: HAVE_SELINUX = False SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } class SelinuxFactCollector(BaseFactCollector): name = 'selinux' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): facts_dict = {} selinux_facts = {} # If selinux library is missing, only set the status and selinux_python_present since # there is no way to tell if SELinux is enabled or disabled on the system # without the library. if not HAVE_SELINUX: selinux_facts['status'] = 'Missing selinux Python library' facts_dict['selinux'] = selinux_facts facts_dict['selinux_python_present'] = False return facts_dict # Set a boolean for testing whether the Python library is present facts_dict['selinux_python_present'] = True if not selinux.is_selinux_enabled(): selinux_facts['status'] = 'disabled' else: selinux_facts['status'] = 'enabled' try: selinux_facts['policyvers'] = selinux.security_policyvers() except (AttributeError, OSError): selinux_facts['policyvers'] = 'unknown' try: (rc, configmode) = selinux.selinux_getenforcemode() if rc == 0: selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown') else: selinux_facts['config_mode'] = 'unknown' except (AttributeError, OSError): selinux_facts['config_mode'] = 'unknown' try: mode = selinux.security_getenforce() selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown') except (AttributeError, OSError): selinux_facts['mode'] = 'unknown' try: (rc, policytype) = selinux.selinux_getpolicytype() if rc == 0: selinux_facts['type'] = policytype else: selinux_facts['type'] = 'unknown' except (AttributeError, OSError): selinux_facts['type'] = 'unknown' facts_dict['selinux'] = selinux_facts return facts_dict
3,255
Python
.py
76
33.881579
95
0.630414
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,279
date_time.py
ansible_ansible/lib/ansible/module_utils/facts/system/date_time.py
# Data and time related facts collection for ansible. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import datetime import time import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector from ansible.module_utils.compat.datetime import utcfromtimestamp class DateTimeFactCollector(BaseFactCollector): name = 'date_time' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): facts_dict = {} date_time_facts = {} # Store the timestamp once, then get local and UTC versions from that epoch_ts = time.time() now = datetime.datetime.fromtimestamp(epoch_ts) utcnow = utcfromtimestamp(epoch_ts).replace(tzinfo=None) date_time_facts['year'] = now.strftime('%Y') date_time_facts['month'] = now.strftime('%m') date_time_facts['weekday'] = now.strftime('%A') date_time_facts['weekday_number'] = now.strftime('%w') date_time_facts['weeknumber'] = now.strftime('%W') date_time_facts['day'] = now.strftime('%d') date_time_facts['hour'] = now.strftime('%H') date_time_facts['minute'] = now.strftime('%M') date_time_facts['second'] = now.strftime('%S') date_time_facts['epoch'] = now.strftime('%s') # epoch returns float or string in some non-linux environments if date_time_facts['epoch'] == '' or date_time_facts['epoch'][0] == '%': date_time_facts['epoch'] = str(int(epoch_ts)) # epoch_int always returns integer format of epoch date_time_facts['epoch_int'] = str(int(now.strftime('%s'))) if date_time_facts['epoch_int'] == '' or date_time_facts['epoch_int'][0] == '%': date_time_facts['epoch_int'] = str(int(epoch_ts)) date_time_facts['date'] = now.strftime('%Y-%m-%d') date_time_facts['time'] = now.strftime('%H:%M:%S') date_time_facts['iso8601_micro'] = utcnow.strftime("%Y-%m-%dT%H:%M:%S.%fZ") date_time_facts['iso8601'] = utcnow.strftime("%Y-%m-%dT%H:%M:%SZ") date_time_facts['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f") date_time_facts['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S") date_time_facts['tz'] = time.strftime("%Z") date_time_facts['tz_dst'] = time.tzname[1] date_time_facts['tz_offset'] = time.strftime("%z") facts_dict['date_time'] = date_time_facts return facts_dict
3,125
Python
.py
60
46.066667
88
0.660668
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,280
caps.py
ansible_ansible/lib/ansible/module_utils/facts/system/caps.py
# Collect facts related to systems 'capabilities' via capsh # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector class SystemCapabilitiesFactCollector(BaseFactCollector): name = 'caps' _fact_ids = set(['system_capabilities', 'system_capabilities_enforced']) # type: t.Set[str] def collect(self, module=None, collected_facts=None): rc = -1 facts_dict = {'system_capabilities_enforced': 'N/A', 'system_capabilities': 'N/A'} if module: capsh_path = module.get_bin_path('capsh') if capsh_path: # NOTE: -> get_caps_data()/parse_caps_data() for easier mocking -akl try: rc, out, err = module.run_command([capsh_path, "--print"], errors='surrogate_then_replace', handle_exceptions=False) except (IOError, OSError) as e: module.warn('Could not query system capabilities: %s' % str(e)) if rc == 0: enforced_caps = [] enforced = 'NA' for line in out.splitlines(): if len(line) < 1: continue if line.startswith('Current:'): if line.split(':')[1].strip() == '=ep': enforced = 'False' else: enforced = 'True' enforced_caps = [i.strip() for i in line.split('=')[1].split(',')] facts_dict['system_capabilities_enforced'] = enforced facts_dict['system_capabilities'] = enforced_caps return facts_dict
2,433
Python
.py
50
37.72
136
0.600506
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,281
fips.py
ansible_ansible/lib/ansible/module_utils/facts/system/fips.py
# Copyright: Contributors to the Ansible project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Determine if a system is in 'fips' mode from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector class FipsFactCollector(BaseFactCollector): name = 'fips' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): # NOTE: this is populated even if it is not set fips_facts = { 'fips': False } if get_file_content('/proc/sys/crypto/fips_enabled') == '1': fips_facts['fips'] = True return fips_facts
812
Python
.py
18
39.5
92
0.700127
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,282
apparmor.py
ansible_ansible/lib/ansible/module_utils/facts/system/apparmor.py
# Collect facts related to apparmor # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector class ApparmorFactCollector(BaseFactCollector): name = 'apparmor' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): facts_dict = {} apparmor_facts = {} if os.path.exists('/sys/kernel/security/apparmor'): apparmor_facts['status'] = 'enabled' else: apparmor_facts['status'] = 'disabled' facts_dict['apparmor'] = apparmor_facts return facts_dict
1,326
Python
.py
32
37.5625
70
0.730949
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,283
ssh_pub_keys.py
ansible_ansible/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector class SshPubKeyFactCollector(BaseFactCollector): name = 'ssh_pub_keys' _fact_ids = set(['ssh_host_pub_keys', 'ssh_host_key_dsa_public', 'ssh_host_key_rsa_public', 'ssh_host_key_ecdsa_public', 'ssh_host_key_ed25519_public']) # type: t.Set[str] def collect(self, module=None, collected_facts=None): ssh_pub_key_facts = {} algos = ('dsa', 'rsa', 'ecdsa', 'ed25519') # list of directories to check for ssh keys # used in the order listed here, the first one with keys is used keydirs = ['/etc/ssh', '/etc/openssh', '/etc'] for keydir in keydirs: for algo in algos: factname = 'ssh_host_key_%s_public' % algo if factname in ssh_pub_key_facts: # a previous keydir was already successful, stop looking # for keys return ssh_pub_key_facts key_filename = '%s/ssh_host_%s_key.pub' % (keydir, algo) keydata = get_file_content(key_filename) if keydata is not None: (keytype, key) = keydata.split()[0:2] ssh_pub_key_facts[factname] = key ssh_pub_key_facts[factname + '_keytype'] = keytype return ssh_pub_key_facts
2,239
Python
.py
45
40.444444
76
0.635531
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,284
systemd.py
ansible_ansible/lib/ansible/module_utils/facts/system/systemd.py
# Get systemd version and features # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector class SystemdFactCollector(BaseFactCollector): name = "systemd" _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): systemctl_bin = module.get_bin_path("systemctl") systemd_facts = {} if systemctl_bin and ServiceMgrFactCollector.is_systemd_managed(module): rc, stdout, dummy = module.run_command( [systemctl_bin, "--version"], check_rc=False, ) if rc != 0: return systemd_facts systemd_facts["systemd"] = { "features": str(stdout.split("\n")[1]), "version": int(stdout.split(" ")[1]), } return systemd_facts
1,661
Python
.py
38
37.631579
81
0.69083
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,285
dns.py
ansible_ansible/lib/ansible/module_utils/facts/system/dns.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector class DnsFactCollector(BaseFactCollector): name = 'dns' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): dns_facts = {} # TODO: flatten dns_facts['dns'] = {} for line in get_file_content('/etc/resolv.conf', '').splitlines(): if line.startswith('#') or line.startswith(';') or line.strip() == '': continue tokens = line.split() if len(tokens) == 0: continue if tokens[0] == 'nameserver': if 'nameservers' not in dns_facts['dns']: dns_facts['dns']['nameservers'] = [] for nameserver in tokens[1:]: dns_facts['dns']['nameservers'].append(nameserver) elif tokens[0] == 'domain': if len(tokens) > 1: dns_facts['dns']['domain'] = tokens[1] elif tokens[0] == 'search': dns_facts['dns']['search'] = [] for suffix in tokens[1:]: dns_facts['dns']['search'].append(suffix) elif tokens[0] == 'sortlist': dns_facts['dns']['sortlist'] = [] for address in tokens[1:]: dns_facts['dns']['sortlist'].append(address) elif tokens[0] == 'options': dns_facts['dns']['options'] = {} if len(tokens) > 1: for option in tokens[1:]: option_tokens = option.split(':', 1) if len(option_tokens) == 0: continue val = len(option_tokens) == 2 and option_tokens[1] or True dns_facts['dns']['options'][option_tokens[0]] = val return dns_facts
2,692
Python
.py
57
36.087719
82
0.57219
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,286
linux.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/linux.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import glob import os import re from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector from ansible.module_utils.facts.utils import get_file_content, get_file_lines class LinuxVirtual(Virtual): """ This is a Linux-specific subclass of Virtual. It defines - virtualization_type - virtualization_role """ platform = 'Linux' # For more information, check: http://people.redhat.com/~rjones/virt-what/ def get_virtual_facts(self): virtual_facts = {} # We want to maintain compatibility with the old "virtualization_type" # and "virtualization_role" entries, so we need to track if we found # them. We won't return them until the end, but if we found them early, # we should avoid updating them again. found_virt = False # But as we go along, we also want to track virt tech the new way. host_tech = set() guest_tech = set() # lxc/docker if os.path.exists('/proc/1/cgroup'): for line in get_file_lines('/proc/1/cgroup'): if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line): guest_tech.add('docker') if not found_virt: virtual_facts['virtualization_type'] = 'docker' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line): guest_tech.add('lxc') if not found_virt: virtual_facts['virtualization_type'] = 'lxc' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('/system.slice/containerd.service', line): guest_tech.add('containerd') if not found_virt: virtual_facts['virtualization_type'] = 'containerd' virtual_facts['virtualization_role'] = 'guest' found_virt = True # lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs if os.path.exists('/proc/1/environ'): for line in get_file_lines('/proc/1/environ', line_sep='\x00'): if re.search('container=lxc', line): guest_tech.add('lxc') if not found_virt: virtual_facts['virtualization_type'] = 'lxc' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('container=podman', line): guest_tech.add('podman') if not found_virt: virtual_facts['virtualization_type'] = 'podman' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('^container=.', line): guest_tech.add('container') if not found_virt: virtual_facts['virtualization_type'] = 'container' virtual_facts['virtualization_role'] = 'guest' found_virt = True if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'): virtual_facts['virtualization_type'] = 'openvz' if os.path.exists('/proc/bc'): host_tech.add('openvz') if not found_virt: virtual_facts['virtualization_role'] = 'host' else: guest_tech.add('openvz') if not found_virt: virtual_facts['virtualization_role'] = 'guest' found_virt = True systemd_container = get_file_content('/run/systemd/container') if systemd_container: guest_tech.add(systemd_container) if not found_virt: virtual_facts['virtualization_type'] = systemd_container virtual_facts['virtualization_role'] = 'guest' found_virt = True # If docker/containerd has a custom cgroup parent, checking /proc/1/cgroup (above) might fail. # https://docs.docker.com/engine/reference/commandline/dockerd/#default-cgroup-parent # Fallback to more rudimentary checks. if os.path.exists('/.dockerenv') or os.path.exists('/.dockerinit'): guest_tech.add('docker') if not found_virt: virtual_facts['virtualization_type'] = 'docker' virtual_facts['virtualization_role'] = 'guest' found_virt = True # ensure 'container' guest_tech is appropriately set if guest_tech.intersection(set(['docker', 'lxc', 'podman', 'openvz', 'containerd'])) or systemd_container: guest_tech.add('container') if os.path.exists("/proc/xen"): is_xen_host = False try: for line in get_file_lines('/proc/xen/capabilities'): if "control_d" in line: is_xen_host = True except IOError: pass if is_xen_host: host_tech.add('xen') if not found_virt: virtual_facts['virtualization_type'] = 'xen' virtual_facts['virtualization_role'] = 'host' else: if not found_virt: virtual_facts['virtualization_type'] = 'xen' virtual_facts['virtualization_role'] = 'guest' found_virt = True # assume guest for this block if not found_virt: virtual_facts['virtualization_role'] = 'guest' product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name') sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor') product_family = get_file_content('/sys/devices/virtual/dmi/id/product_family') if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV'): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' found_virt = True if sys_vendor == 'oVirt': guest_tech.add('oVirt') if not found_virt: virtual_facts['virtualization_type'] = 'oVirt' found_virt = True if sys_vendor == 'Red Hat': if product_family == 'RHV': guest_tech.add('RHV') if not found_virt: virtual_facts['virtualization_type'] = 'RHV' found_virt = True elif product_name == 'RHEV Hypervisor': guest_tech.add('RHEV') if not found_virt: virtual_facts['virtualization_type'] = 'RHEV' found_virt = True if product_name and product_name.startswith(("VMware",)): guest_tech.add('VMware') if not found_virt: virtual_facts['virtualization_type'] = 'VMware' found_virt = True if product_name in ('OpenStack Compute', 'OpenStack Nova'): guest_tech.add('openstack') if not found_virt: virtual_facts['virtualization_type'] = 'openstack' found_virt = True bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') if bios_vendor == 'Xen': guest_tech.add('xen') if not found_virt: virtual_facts['virtualization_type'] = 'xen' found_virt = True if bios_vendor == 'innotek GmbH': guest_tech.add('virtualbox') if not found_virt: virtual_facts['virtualization_type'] = 'virtualbox' found_virt = True if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' found_virt = True KVM_SYS_VENDORS = ('QEMU', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway', 'Nutanix') if sys_vendor in KVM_SYS_VENDORS: guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' found_virt = True if sys_vendor == 'KubeVirt': guest_tech.add('KubeVirt') if not found_virt: virtual_facts['virtualization_type'] = 'KubeVirt' found_virt = True # FIXME: This does also match hyperv if sys_vendor == 'Microsoft Corporation': guest_tech.add('VirtualPC') if not found_virt: virtual_facts['virtualization_type'] = 'VirtualPC' found_virt = True if sys_vendor == 'Parallels Software International Inc.': guest_tech.add('parallels') if not found_virt: virtual_facts['virtualization_type'] = 'parallels' found_virt = True if sys_vendor == 'OpenStack Foundation': guest_tech.add('openstack') if not found_virt: virtual_facts['virtualization_type'] = 'openstack' found_virt = True # unassume guest if not found_virt: del virtual_facts['virtualization_role'] if os.path.exists('/proc/self/status'): for line in get_file_lines('/proc/self/status'): if re.match(r'^VxID:\s+\d+', line): if not found_virt: virtual_facts['virtualization_type'] = 'linux_vserver' if re.match(r'^VxID:\s+0', line): host_tech.add('linux_vserver') if not found_virt: virtual_facts['virtualization_role'] = 'host' else: guest_tech.add('linux_vserver') if not found_virt: virtual_facts['virtualization_role'] = 'guest' found_virt = True if os.path.exists('/proc/cpuinfo'): for line in get_file_lines('/proc/cpuinfo'): if re.match('^model name.*QEMU Virtual CPU', line): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' elif re.match('^vendor_id.*User Mode Linux', line): guest_tech.add('uml') if not found_virt: virtual_facts['virtualization_type'] = 'uml' elif re.match('^model name.*UML', line): guest_tech.add('uml') if not found_virt: virtual_facts['virtualization_type'] = 'uml' elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' elif re.match('^vendor_id.*PowerVM Lx86', line): guest_tech.add('powervm_lx86') if not found_virt: virtual_facts['virtualization_type'] = 'powervm_lx86' elif re.match('^vendor_id.*IBM/S390', line): guest_tech.add('PR/SM') if not found_virt: virtual_facts['virtualization_type'] = 'PR/SM' lscpu = self.module.get_bin_path('lscpu') if lscpu: rc, out, err = self.module.run_command(["lscpu"]) if rc == 0: for line in out.splitlines(): data = line.split(":", 1) key = data[0].strip() if key == 'Hypervisor': tech = data[1].strip() guest_tech.add(tech) if not found_virt: virtual_facts['virtualization_type'] = tech else: guest_tech.add('ibm_systemz') if not found_virt: virtual_facts['virtualization_type'] = 'ibm_systemz' else: continue if virtual_facts['virtualization_type'] == 'PR/SM': if not found_virt: virtual_facts['virtualization_role'] = 'LPAR' else: if not found_virt: virtual_facts['virtualization_role'] = 'guest' if not found_virt: found_virt = True # Beware that we can have both kvm and virtualbox running on a single system if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK): modules = [] for line in get_file_lines("/proc/modules"): data = line.split(" ", 1) modules.append(data[0]) if 'kvm' in modules: host_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' virtual_facts['virtualization_role'] = 'host' if os.path.isdir('/rhev/'): # Check whether this is a RHEV hypervisor (is vdsm running ?) for f in glob.glob('/proc/[0-9]*/comm'): try: with open(f) as virt_fh: comm_content = virt_fh.read().rstrip() if comm_content in ('vdsm', 'vdsmd'): # We add both kvm and RHEV to host_tech in this case. # It's accurate. RHEV uses KVM. host_tech.add('RHEV') if not found_virt: virtual_facts['virtualization_type'] = 'RHEV' break except Exception: pass found_virt = True if 'vboxdrv' in modules: host_tech.add('virtualbox') if not found_virt: virtual_facts['virtualization_type'] = 'virtualbox' virtual_facts['virtualization_role'] = 'host' found_virt = True if 'virtio' in modules: host_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' virtual_facts['virtualization_role'] = 'guest' found_virt = True # In older Linux Kernel versions, /sys filesystem is not available # dmidecode is the safest option to parse virtualization related values dmi_bin = self.module.get_bin_path('dmidecode') # We still want to continue even if dmidecode is not available if dmi_bin is not None: (rc, out, err) = self.module.run_command('%s -s system-product-name' % dmi_bin) if rc == 0: # Strip out commented lines (specific dmidecode output) vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')]) if vendor_name.startswith('VMware'): guest_tech.add('VMware') if not found_virt: virtual_facts['virtualization_type'] = 'VMware' virtual_facts['virtualization_role'] = 'guest' found_virt = True if 'BHYVE' in out: guest_tech.add('bhyve') if not found_virt: virtual_facts['virtualization_type'] = 'bhyve' virtual_facts['virtualization_role'] = 'guest' found_virt = True if os.path.exists('/dev/kvm'): host_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' virtual_facts['virtualization_role'] = 'host' found_virt = True # If none of the above matches, return 'NA' for virtualization_type # and virtualization_role. This allows for proper grouping. if not found_virt: virtual_facts['virtualization_type'] = 'NA' virtual_facts['virtualization_role'] = 'NA' found_virt = True virtual_facts['virtualization_tech_guest'] = guest_tech virtual_facts['virtualization_tech_host'] = host_tech return virtual_facts class LinuxVirtualCollector(VirtualCollector): _fact_class = LinuxVirtual _platform = 'Linux'
17,822
Python
.py
355
33.853521
117
0.519233
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,287
sunos.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/sunos.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector class SunOSVirtual(Virtual): """ This is a SunOS-specific subclass of Virtual. It defines - virtualization_type - virtualization_role - container """ platform = 'SunOS' def get_virtual_facts(self): virtual_facts = {} host_tech = set() guest_tech = set() # Check if it's a zone zonename = self.module.get_bin_path('zonename') if zonename: rc, out, err = self.module.run_command(zonename) if rc == 0: if out.rstrip() == "global": host_tech.add('zone') else: guest_tech.add('zone') virtual_facts['container'] = 'zone' # Check if it's a branded zone (i.e. Solaris 8/9 zone) if os.path.isdir('/.SUNWnative'): guest_tech.add('zone') virtual_facts['container'] = 'zone' # If it's a zone check if we can detect if our global zone is itself virtualized. # Relies on the "guest tools" (e.g. vmware tools) to be installed if 'container' in virtual_facts and virtual_facts['container'] == 'zone': modinfo = self.module.get_bin_path('modinfo') if modinfo: rc, out, err = self.module.run_command(modinfo) if rc == 0: for line in out.splitlines(): if 'VMware' in line: guest_tech.add('vmware') virtual_facts['virtualization_type'] = 'vmware' virtual_facts['virtualization_role'] = 'guest' if 'VirtualBox' in line: guest_tech.add('virtualbox') virtual_facts['virtualization_type'] = 'virtualbox' virtual_facts['virtualization_role'] = 'guest' if os.path.exists('/proc/vz'): guest_tech.add('virtuozzo') virtual_facts['virtualization_type'] = 'virtuozzo' virtual_facts['virtualization_role'] = 'guest' # Detect domaining on Sparc hardware virtinfo = self.module.get_bin_path('virtinfo') if virtinfo: # The output of virtinfo is different whether we are on a machine with logical # domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first. rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p") # The output contains multiple lines with different keys like this: # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false # The output may also be not formatted and the returncode is set to 0 regardless of the error condition: # virtinfo can only be run from the global zone if rc == 0: try: for line in out.splitlines(): fields = line.split('|') if fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms': guest_tech.add('ldom') virtual_facts['virtualization_type'] = 'ldom' virtual_facts['virtualization_role'] = 'guest' hostfeatures = [] for field in fields[2:]: arg = field.split('=') if arg[1] == 'true': hostfeatures.append(arg[0]) if len(hostfeatures) > 0: virtual_facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')' except ValueError: pass else: smbios = self.module.get_bin_path('smbios') if not smbios: return rc, out, err = self.module.run_command(smbios) if rc == 0: for line in out.splitlines(): if 'VMware' in line: guest_tech.add('vmware') virtual_facts['virtualization_type'] = 'vmware' virtual_facts['virtualization_role'] = 'guest' elif 'Parallels' in line: guest_tech.add('parallels') virtual_facts['virtualization_type'] = 'parallels' virtual_facts['virtualization_role'] = 'guest' elif 'VirtualBox' in line: guest_tech.add('virtualbox') virtual_facts['virtualization_type'] = 'virtualbox' virtual_facts['virtualization_role'] = 'guest' elif 'HVM domU' in line: guest_tech.add('xen') virtual_facts['virtualization_type'] = 'xen' virtual_facts['virtualization_role'] = 'guest' elif 'KVM' in line: guest_tech.add('kvm') virtual_facts['virtualization_type'] = 'kvm' virtual_facts['virtualization_role'] = 'guest' virtual_facts['virtualization_tech_guest'] = guest_tech virtual_facts['virtualization_tech_host'] = host_tech return virtual_facts class SunOSVirtualCollector(VirtualCollector): _fact_class = SunOSVirtual _platform = 'SunOS'
6,217
Python
.py
123
35.471545
116
0.542194
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,288
base.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/base.py
# base classes for virtualization facts # -*- coding: utf-8 -*- # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.collector import BaseFactCollector class Virtual: """ This is a generic Virtual subclass of Facts. This should be further subclassed to implement per platform. If you subclass this, you should define: - virtualization_type - virtualization_role - container (e.g. solaris zones, freebsd jails, linux containers) All subclasses MUST define platform. """ platform = 'Generic' # FIXME: remove load_on_init if we can def __init__(self, module, load_on_init=False): self.module = module # FIXME: just here for existing tests cases till they are updated def populate(self, collected_facts=None): virtual_facts = self.get_virtual_facts() return virtual_facts def get_virtual_facts(self): virtual_facts = { 'virtualization_type': '', 'virtualization_role': '', 'virtualization_tech_guest': set(), 'virtualization_tech_host': set(), } return virtual_facts class VirtualCollector(BaseFactCollector): name = 'virtual' _fact_class = Virtual _fact_ids = set([ 'virtualization_type', 'virtualization_role', 'virtualization_tech_guest', 'virtualization_tech_host', ]) # type: t.Set[str] def collect(self, module=None, collected_facts=None): collected_facts = collected_facts or {} if not module: return {} # Network munges cached_facts by side effect, so give it a copy facts_obj = self._fact_class(module) facts_dict = facts_obj.populate(collected_facts=collected_facts) return facts_dict
2,493
Python
.py
63
34.063492
72
0.694698
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,289
freebsd.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/freebsd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin class FreeBSDVirtual(Virtual, VirtualSysctlDetectionMixin): """ This is a FreeBSD-specific subclass of Virtual. It defines - virtualization_type - virtualization_role """ platform = 'FreeBSD' def get_virtual_facts(self): virtual_facts = {} host_tech = set() guest_tech = set() # Set empty values as default virtual_facts['virtualization_type'] = '' virtual_facts['virtualization_role'] = '' if os.path.exists('/dev/xen/xenstore'): guest_tech.add('xen') virtual_facts['virtualization_type'] = 'xen' virtual_facts['virtualization_role'] = 'guest' kern_vm_guest = self.detect_virt_product('kern.vm_guest') guest_tech.update(kern_vm_guest['virtualization_tech_guest']) host_tech.update(kern_vm_guest['virtualization_tech_host']) hw_hv_vendor = self.detect_virt_product('hw.hv_vendor') guest_tech.update(hw_hv_vendor['virtualization_tech_guest']) host_tech.update(hw_hv_vendor['virtualization_tech_host']) sec_jail_jailed = self.detect_virt_product('security.jail.jailed') guest_tech.update(sec_jail_jailed['virtualization_tech_guest']) host_tech.update(sec_jail_jailed['virtualization_tech_host']) if virtual_facts['virtualization_type'] == '': sysctl = kern_vm_guest or hw_hv_vendor or sec_jail_jailed # We call update here, then re-set virtualization_tech_host/guest # later. virtual_facts.update(sysctl) virtual_vendor_facts = self.detect_virt_vendor('hw.model') guest_tech.update(virtual_vendor_facts['virtualization_tech_guest']) host_tech.update(virtual_vendor_facts['virtualization_tech_host']) if virtual_facts['virtualization_type'] == '': virtual_facts.update(virtual_vendor_facts) virtual_facts['virtualization_tech_guest'] = guest_tech virtual_facts['virtualization_tech_host'] = host_tech return virtual_facts class FreeBSDVirtualCollector(VirtualCollector): _fact_class = FreeBSDVirtual _platform = 'FreeBSD'
3,018
Python
.py
61
42.885246
81
0.703061
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,290
dragonfly.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/dragonfly.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.virtual.freebsd import FreeBSDVirtual, VirtualCollector class DragonFlyVirtualCollector(VirtualCollector): # Note the _fact_class impl is actually the FreeBSDVirtual impl _fact_class = FreeBSDVirtual _platform = 'DragonFly'
959
Python
.py
20
46.15
87
0.789305
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,291
sysctl.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/sysctl.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re class VirtualSysctlDetectionMixin(object): def detect_sysctl(self): self.sysctl_path = self.module.get_bin_path('sysctl') def detect_virt_product(self, key): virtual_product_facts = {} host_tech = set() guest_tech = set() # We do similar to what we do in linux.py -- We want to allow multiple # virt techs to show up, but maintain compatibility, so we have to track # when we would have stopped, even though now we go through everything. found_virt = False self.detect_sysctl() if self.sysctl_path: rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key)) if rc == 0: if re.match('(KVM|kvm|Bochs|SmartDC).*', out): guest_tech.add('kvm') if not found_virt: virtual_product_facts['virtualization_type'] = 'kvm' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True if re.match('.*VMware.*', out): guest_tech.add('VMware') if not found_virt: virtual_product_facts['virtualization_type'] = 'VMware' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True if out.rstrip() == 'VirtualBox': guest_tech.add('virtualbox') if not found_virt: virtual_product_facts['virtualization_type'] = 'virtualbox' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True if re.match('(HVM domU|XenPVH|XenPV|XenPVHVM).*', out): guest_tech.add('xen') if not found_virt: virtual_product_facts['virtualization_type'] = 'xen' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True if out.rstrip() == 'Hyper-V': guest_tech.add('Hyper-V') if not found_virt: virtual_product_facts['virtualization_type'] = 'Hyper-V' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True if out.rstrip() == 'Parallels': guest_tech.add('parallels') if not found_virt: virtual_product_facts['virtualization_type'] = 'parallels' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True if out.rstrip() == 'RHEV Hypervisor': guest_tech.add('RHEV') if not found_virt: virtual_product_facts['virtualization_type'] = 'RHEV' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True if (key == 'security.jail.jailed') and (out.rstrip() == '1'): guest_tech.add('jails') if not found_virt: virtual_product_facts['virtualization_type'] = 'jails' virtual_product_facts['virtualization_role'] = 'guest' found_virt = True virtual_product_facts['virtualization_tech_guest'] = guest_tech virtual_product_facts['virtualization_tech_host'] = host_tech return virtual_product_facts def detect_virt_vendor(self, key): virtual_vendor_facts = {} host_tech = set() guest_tech = set() self.detect_sysctl() if self.sysctl_path: rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key)) if rc == 0: if out.rstrip() == 'QEMU': guest_tech.add('kvm') virtual_vendor_facts['virtualization_type'] = 'kvm' virtual_vendor_facts['virtualization_role'] = 'guest' if out.rstrip() == 'OpenBSD': guest_tech.add('vmm') virtual_vendor_facts['virtualization_type'] = 'vmm' virtual_vendor_facts['virtualization_role'] = 'guest' virtual_vendor_facts['virtualization_tech_guest'] = guest_tech virtual_vendor_facts['virtualization_tech_host'] = host_tech return virtual_vendor_facts
5,260
Python
.py
101
37.079208
88
0.548262
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,292
netbsd.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/netbsd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin class NetBSDVirtual(Virtual, VirtualSysctlDetectionMixin): platform = 'NetBSD' def get_virtual_facts(self): virtual_facts = {} host_tech = set() guest_tech = set() # Set empty values as default virtual_facts['virtualization_type'] = '' virtual_facts['virtualization_role'] = '' virtual_product_facts = self.detect_virt_product('machdep.dmi.system-product') guest_tech.update(virtual_product_facts['virtualization_tech_guest']) host_tech.update(virtual_product_facts['virtualization_tech_host']) virtual_facts.update(virtual_product_facts) virtual_vendor_facts = self.detect_virt_vendor('machdep.dmi.system-vendor') guest_tech.update(virtual_vendor_facts['virtualization_tech_guest']) host_tech.update(virtual_vendor_facts['virtualization_tech_host']) if virtual_facts['virtualization_type'] == '': virtual_facts.update(virtual_vendor_facts) # The above logic is tried first for backwards compatibility. If # something above matches, use it. Otherwise if the result is still # empty, try machdep.hypervisor. virtual_vendor_facts = self.detect_virt_vendor('machdep.hypervisor') guest_tech.update(virtual_vendor_facts['virtualization_tech_guest']) host_tech.update(virtual_vendor_facts['virtualization_tech_host']) if virtual_facts['virtualization_type'] == '': virtual_facts.update(virtual_vendor_facts) if os.path.exists('/dev/xencons'): guest_tech.add('xen') if virtual_facts['virtualization_type'] == '': virtual_facts['virtualization_type'] = 'xen' virtual_facts['virtualization_role'] = 'guest' virtual_facts['virtualization_tech_guest'] = guest_tech virtual_facts['virtualization_tech_host'] = host_tech return virtual_facts class NetBSDVirtualCollector(VirtualCollector): _fact_class = NetBSDVirtual _platform = 'NetBSD'
2,896
Python
.py
55
45.963636
86
0.712819
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,293
hpux.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/hpux.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os import re from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector class HPUXVirtual(Virtual): """ This is a HP-UX specific subclass of Virtual. It defines - virtualization_type - virtualization_role """ platform = 'HP-UX' def get_virtual_facts(self): virtual_facts = {} host_tech = set() guest_tech = set() if os.path.exists('/usr/sbin/vecheck'): rc, out, err = self.module.run_command("/usr/sbin/vecheck") if rc == 0: guest_tech.add('HP vPar') virtual_facts['virtualization_type'] = 'guest' virtual_facts['virtualization_role'] = 'HP vPar' if os.path.exists('/opt/hpvm/bin/hpvminfo'): rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo") if rc == 0 and re.match('.*Running.*HPVM vPar.*', out): guest_tech.add('HPVM vPar') virtual_facts['virtualization_type'] = 'guest' virtual_facts['virtualization_role'] = 'HPVM vPar' elif rc == 0 and re.match('.*Running.*HPVM guest.*', out): guest_tech.add('HPVM IVM') virtual_facts['virtualization_type'] = 'guest' virtual_facts['virtualization_role'] = 'HPVM IVM' elif rc == 0 and re.match('.*Running.*HPVM host.*', out): guest_tech.add('HPVM') virtual_facts['virtualization_type'] = 'host' virtual_facts['virtualization_role'] = 'HPVM' if os.path.exists('/usr/sbin/parstatus'): rc, out, err = self.module.run_command("/usr/sbin/parstatus") if rc == 0: guest_tech.add('HP nPar') virtual_facts['virtualization_type'] = 'guest' virtual_facts['virtualization_role'] = 'HP nPar' virtual_facts['virtualization_tech_guest'] = guest_tech virtual_facts['virtualization_tech_host'] = host_tech return virtual_facts class HPUXVirtualCollector(VirtualCollector): _fact_class = HPUXVirtual _platform = 'HP-UX'
2,823
Python
.py
61
37.836066
77
0.631904
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,294
openbsd.py
ansible_ansible/lib/ansible/module_utils/facts/virtual/openbsd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin from ansible.module_utils.facts.utils import get_file_content class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin): """ This is a OpenBSD-specific subclass of Virtual. It defines - virtualization_type - virtualization_role """ platform = 'OpenBSD' DMESG_BOOT = '/var/run/dmesg.boot' def get_virtual_facts(self): virtual_facts = {} host_tech = set() guest_tech = set() # Set empty values as default virtual_facts['virtualization_type'] = '' virtual_facts['virtualization_role'] = '' virtual_product_facts = self.detect_virt_product('hw.product') guest_tech.update(virtual_product_facts['virtualization_tech_guest']) host_tech.update(virtual_product_facts['virtualization_tech_host']) virtual_facts.update(virtual_product_facts) virtual_vendor_facts = self.detect_virt_vendor('hw.vendor') guest_tech.update(virtual_vendor_facts['virtualization_tech_guest']) host_tech.update(virtual_vendor_facts['virtualization_tech_host']) if virtual_facts['virtualization_type'] == '': virtual_facts.update(virtual_vendor_facts) # Check the dmesg if vmm(4) attached, indicating the host is # capable of virtualization. dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT) for line in dmesg_boot.splitlines(): match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line) if match: host_tech.add('vmm') virtual_facts['virtualization_type'] = 'vmm' virtual_facts['virtualization_role'] = 'host' virtual_facts['virtualization_tech_guest'] = guest_tech virtual_facts['virtualization_tech_host'] = host_tech return virtual_facts class OpenBSDVirtualCollector(VirtualCollector): _fact_class = OpenBSDVirtual _platform = 'OpenBSD'
2,785
Python
.py
58
41.724138
81
0.708333
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,295
facter.py
ansible_ansible/lib/ansible/module_utils/facts/other/facter.py
# Copyright (c) 2023 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations import json import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.namespace import PrefixFactNamespace from ansible.module_utils.facts.collector import BaseFactCollector class FacterFactCollector(BaseFactCollector): name = 'facter' _fact_ids = set(['facter']) # type: t.Set[str] def __init__(self, collectors=None, namespace=None): namespace = PrefixFactNamespace(namespace_name='facter', prefix='facter_') super(FacterFactCollector, self).__init__(collectors=collectors, namespace=namespace) def find_facter(self, module): facter_path = module.get_bin_path( 'facter', opt_dirs=['/opt/puppetlabs/bin'] ) cfacter_path = module.get_bin_path( 'cfacter', opt_dirs=['/opt/puppetlabs/bin'] ) # Prefer to use cfacter if available if cfacter_path is not None: facter_path = cfacter_path return facter_path def run_facter(self, module, facter_path): # if facter is installed, and we can use --json because # ruby-json is ALSO installed, include facter data in the JSON rc, out, err = module.run_command(facter_path + " --puppet --json") # for some versions of facter, --puppet returns an error if puppet is not present, # try again w/o it, other errors should still appear and be sent back if rc != 0: rc, out, err = module.run_command(facter_path + " --json") return rc, out, err def get_facter_output(self, module): facter_path = self.find_facter(module) if not facter_path: return None rc, out, err = self.run_facter(module, facter_path) if rc != 0: return None return out def collect(self, module=None, collected_facts=None): # Note that this mirrors previous facter behavior, where there isnt # a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever' # items are added to the main dict. facter_dict = {} if not module: return facter_dict facter_output = self.get_facter_output(module) # TODO: if we fail, should we add a empty facter key or nothing? if facter_output is None: return facter_dict try: facter_dict = json.loads(facter_output) except Exception: module.warn("Failed to parse facter facts") return facter_dict
2,770
Python
.py
61
35.540984
92
0.625465
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,296
ohai.py
ansible_ansible/lib/ansible/module_utils/facts/other/ohai.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import json import ansible.module_utils.compat.typing as t from ansible.module_utils.facts.namespace import PrefixFactNamespace from ansible.module_utils.facts.collector import BaseFactCollector class OhaiFactCollector(BaseFactCollector): """This is a subclass of Facts for including information gathered from Ohai.""" name = 'ohai' _fact_ids = set() # type: t.Set[str] def __init__(self, collectors=None, namespace=None): namespace = PrefixFactNamespace(namespace_name='ohai', prefix='ohai_') super(OhaiFactCollector, self).__init__(collectors=collectors, namespace=namespace) def find_ohai(self, module): return module.get_bin_path( 'ohai' ) def run_ohai(self, module, ohai_path): rc, out, err = module.run_command(ohai_path) return rc, out, err def get_ohai_output(self, module): ohai_path = self.find_ohai(module) if not ohai_path: return None rc, out, err = self.run_ohai(module, ohai_path) if rc != 0: return None return out def collect(self, module=None, collected_facts=None): ohai_facts = {} if not module: return ohai_facts ohai_output = self.get_ohai_output(module) if ohai_output is None: return ohai_facts try: ohai_facts = json.loads(ohai_output) except Exception: module.warn("Failed to gather ohai facts") return ohai_facts
2,294
Python
.py
55
34.054545
83
0.663215
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,297
linux.py
ansible_ansible/lib/ansible/module_utils/facts/hardware/linux.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import collections import errno import glob import json import os import re import sys import time from ansible.module_utils._internal._concurrent import _futures from ansible.module_utils.common.locale import get_best_parsable_locale from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.common.text.formatters import bytes_to_human from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size from ansible.module_utils.six import iteritems # import this as a module to ensure we get the same module instance from ansible.module_utils.facts import timeout def get_partition_uuid(partname): try: uuids = os.listdir("/dev/disk/by-uuid") except OSError: return for uuid in uuids: dev = os.path.realpath("/dev/disk/by-uuid/" + uuid) if dev == ("/dev/" + partname): return uuid return None class LinuxHardware(Hardware): """ Linux-specific subclass of Hardware. Defines memory and CPU facts: - memfree_mb - memtotal_mb - swapfree_mb - swaptotal_mb - processor (a list) - processor_cores - processor_count In addition, it also defines number of DMI facts and device facts. """ platform = 'Linux' # Originally only had these four as toplevelfacts ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree')) # Now we have all of these in a dict structure MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached')) # regex used against findmnt output to detect bind mounts BIND_MOUNT_RE = re.compile(r'.*\]') # regex used against mtab content to find entries that are bind mounts MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"') # regex used for replacing octal escape sequences OCTAL_ESCAPE_RE = re.compile(r'\\[0-9]{3}') def populate(self, collected_facts=None): hardware_facts = {} locale = get_best_parsable_locale(self.module) self.module.run_command_environ_update = {'LANG': locale, 'LC_ALL': locale, 'LC_NUMERIC': locale} cpu_facts = self.get_cpu_facts(collected_facts=collected_facts) memory_facts = self.get_memory_facts() dmi_facts = self.get_dmi_facts() sysinfo_facts = self.get_sysinfo_facts() device_facts = self.get_device_facts() uptime_facts = self.get_uptime_facts() lvm_facts = self.get_lvm_facts() mount_facts = {} try: mount_facts = self.get_mount_facts() except timeout.TimeoutError: self.module.warn("No mount facts were gathered due to timeout.") hardware_facts.update(cpu_facts) hardware_facts.update(memory_facts) hardware_facts.update(dmi_facts) hardware_facts.update(sysinfo_facts) hardware_facts.update(device_facts) hardware_facts.update(uptime_facts) hardware_facts.update(lvm_facts) hardware_facts.update(mount_facts) return hardware_facts def get_memory_facts(self): memory_facts = {} if not os.access("/proc/meminfo", os.R_OK): return memory_facts memstats = {} for line in get_file_lines("/proc/meminfo"): data = line.split(":", 1) key = data[0] if key in self.ORIGINAL_MEMORY_FACTS: val = data[1].strip().split(' ')[0] memory_facts["%s_mb" % key.lower()] = int(val) // 1024 if key in self.MEMORY_FACTS: val = data[1].strip().split(' ')[0] memstats[key.lower()] = int(val) // 1024 if None not in (memstats.get('memtotal'), memstats.get('memfree')): memstats['real:used'] = memstats['memtotal'] - memstats['memfree'] if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')): memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers'] if None not in (memstats.get('memtotal'), memstats.get('nocache:free')): memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free'] if None not in (memstats.get('swaptotal'), memstats.get('swapfree')): memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree'] memory_facts['memory_mb'] = { 'real': { 'total': memstats.get('memtotal'), 'used': memstats.get('real:used'), 'free': memstats.get('memfree'), }, 'nocache': { 'free': memstats.get('nocache:free'), 'used': memstats.get('nocache:used'), }, 'swap': { 'total': memstats.get('swaptotal'), 'free': memstats.get('swapfree'), 'used': memstats.get('swap:used'), 'cached': memstats.get('swapcached'), }, } return memory_facts def get_cpu_facts(self, collected_facts=None): cpu_facts = {} collected_facts = collected_facts or {} i = 0 vendor_id_occurrence = 0 model_name_occurrence = 0 processor_occurrence = 0 physid = 0 coreid = 0 sockets = {} cores = {} zp = 0 zmt = 0 xen = False xen_paravirt = False try: if os.path.exists('/proc/xen'): xen = True else: for line in get_file_lines('/sys/hypervisor/type'): if line.strip() == 'xen': xen = True # Only interested in the first line break except IOError: pass if not os.access("/proc/cpuinfo", os.R_OK): return cpu_facts cpu_facts['processor'] = [] for line in get_file_lines('/proc/cpuinfo'): data = line.split(":", 1) key = data[0].strip() try: val = data[1].strip() except IndexError: val = "" if xen: if key == 'flags': # Check for vme cpu flag, Xen paravirt does not expose this. # Need to detect Xen paravirt because it exposes cpuinfo # differently than Xen HVM or KVM and causes reporting of # only a single cpu core. if 'vme' not in val: xen_paravirt = True if key == "flags": cpu_facts['flags'] = val.split() # model name is for Intel arch, Processor (mind the uppercase P) # works for some ARM devices, like the Sheevaplug. if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']: if 'processor' not in cpu_facts: cpu_facts['processor'] = [] cpu_facts['processor'].append(val) if key == 'vendor_id': vendor_id_occurrence += 1 if key == 'model name': model_name_occurrence += 1 if key == 'processor': processor_occurrence += 1 i += 1 elif key == 'physical id': physid = val if physid not in sockets: sockets[physid] = 1 elif key == 'core id': coreid = val if coreid not in sockets: cores[coreid] = 1 elif key == 'cpu cores': sockets[physid] = int(val) elif key == 'siblings': cores[coreid] = int(val) # S390x classic cpuinfo elif key == '# processors': zp = int(val) elif key == 'max thread id': zmt = int(val) + 1 # SPARC elif key == 'ncpus active': i = int(val) # Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le) if vendor_id_occurrence > 0: if vendor_id_occurrence == model_name_occurrence: i = vendor_id_occurrence # The fields for ARM CPUs do not always include 'vendor_id' or 'model name', # and sometimes includes both 'processor' and 'Processor'. # The fields for Power CPUs include 'processor' and 'cpu'. # Always use 'processor' count for ARM and Power systems if collected_facts.get('ansible_architecture', '').startswith(('armv', 'aarch', 'ppc')): i = processor_occurrence if collected_facts.get('ansible_architecture') == 's390x': # getting sockets would require 5.7+ with CONFIG_SCHED_TOPOLOGY cpu_facts['processor_count'] = 1 cpu_facts['processor_cores'] = round(zp / zmt) cpu_facts['processor_threads_per_core'] = zmt cpu_facts['processor_vcpus'] = zp cpu_facts['processor_nproc'] = zp else: if xen_paravirt: cpu_facts['processor_count'] = i cpu_facts['processor_cores'] = i cpu_facts['processor_threads_per_core'] = 1 cpu_facts['processor_vcpus'] = i cpu_facts['processor_nproc'] = i else: if sockets: cpu_facts['processor_count'] = len(sockets) else: cpu_facts['processor_count'] = i socket_values = list(sockets.values()) if socket_values and socket_values[0]: cpu_facts['processor_cores'] = socket_values[0] else: cpu_facts['processor_cores'] = 1 core_values = list(cores.values()) if core_values: cpu_facts['processor_threads_per_core'] = round(core_values[0] / cpu_facts['processor_cores']) else: cpu_facts['processor_threads_per_core'] = round(1 / cpu_facts['processor_cores']) cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] * cpu_facts['processor_count'] * cpu_facts['processor_cores']) cpu_facts['processor_nproc'] = processor_occurrence # if the number of processors available to the module's # thread cannot be determined, the processor count # reported by /proc will be the default (as previously defined) try: cpu_facts['processor_nproc'] = len( os.sched_getaffinity(0) ) except AttributeError: # In Python < 3.3, os.sched_getaffinity() is not available nproc_cmd = self.module.get_bin_path('nproc') if nproc_cmd is not None: rc, out, _err = self.module.run_command(nproc_cmd) if rc == 0: cpu_facts['processor_nproc'] = int(out) return cpu_facts def get_dmi_facts(self): """ learn dmi facts from system Try /sys first for dmi related facts. If that is not available, fall back to dmidecode executable """ dmi_facts = {} if os.path.exists('/sys/devices/virtual/dmi/id/product_name'): # Use kernel DMI info, if available # DMI SPEC -- https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_3.2.0.pdf FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop", "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower", "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station", "All In One", "Sub Notebook", "Space-saving", "Lunch Box", "Main Server Chassis", "Expansion Chassis", "Sub Chassis", "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis", "Rack Mount Chassis", "Sealed-case PC", "Multi-system", "CompactPCI", "AdvancedTCA", "Blade", "Blade Enclosure", "Tablet", "Convertible", "Detachable", "IoT Gateway", "Embedded PC", "Mini PC", "Stick PC"] DMI_DICT = { 'bios_date': '/sys/devices/virtual/dmi/id/bios_date', 'bios_vendor': '/sys/devices/virtual/dmi/id/bios_vendor', 'bios_version': '/sys/devices/virtual/dmi/id/bios_version', 'board_asset_tag': '/sys/devices/virtual/dmi/id/board_asset_tag', 'board_name': '/sys/devices/virtual/dmi/id/board_name', 'board_serial': '/sys/devices/virtual/dmi/id/board_serial', 'board_vendor': '/sys/devices/virtual/dmi/id/board_vendor', 'board_version': '/sys/devices/virtual/dmi/id/board_version', 'chassis_asset_tag': '/sys/devices/virtual/dmi/id/chassis_asset_tag', 'chassis_serial': '/sys/devices/virtual/dmi/id/chassis_serial', 'chassis_vendor': '/sys/devices/virtual/dmi/id/chassis_vendor', 'chassis_version': '/sys/devices/virtual/dmi/id/chassis_version', 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type', 'product_name': '/sys/devices/virtual/dmi/id/product_name', 'product_serial': '/sys/devices/virtual/dmi/id/product_serial', 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid', 'product_version': '/sys/devices/virtual/dmi/id/product_version', 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor', } for (key, path) in DMI_DICT.items(): data = get_file_content(path) if data is not None: if key == 'form_factor': try: dmi_facts['form_factor'] = FORM_FACTOR[int(data)] except IndexError: dmi_facts['form_factor'] = 'unknown (%s)' % data else: dmi_facts[key] = data else: dmi_facts[key] = 'NA' else: # Fall back to using dmidecode, if available DMI_DICT = { 'bios_date': 'bios-release-date', 'bios_vendor': 'bios-vendor', 'bios_version': 'bios-version', 'board_asset_tag': 'baseboard-asset-tag', 'board_name': 'baseboard-product-name', 'board_serial': 'baseboard-serial-number', 'board_vendor': 'baseboard-manufacturer', 'board_version': 'baseboard-version', 'chassis_asset_tag': 'chassis-asset-tag', 'chassis_serial': 'chassis-serial-number', 'chassis_vendor': 'chassis-manufacturer', 'chassis_version': 'chassis-version', 'form_factor': 'chassis-type', 'product_name': 'system-product-name', 'product_serial': 'system-serial-number', 'product_uuid': 'system-uuid', 'product_version': 'system-version', 'system_vendor': 'system-manufacturer', } dmi_bin = self.module.get_bin_path('dmidecode') if dmi_bin is None: dmi_facts = dict.fromkeys( DMI_DICT.keys(), 'NA' ) return dmi_facts for (k, v) in DMI_DICT.items(): (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) if rc == 0: # Strip out commented lines (specific dmidecode output) thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')]) try: json.dumps(thisvalue) except UnicodeDecodeError: thisvalue = "NA" dmi_facts[k] = thisvalue else: dmi_facts[k] = 'NA' return dmi_facts def get_sysinfo_facts(self): """Fetch /proc/sysinfo facts from s390 Linux on IBM Z""" if not os.path.exists('/proc/sysinfo'): return {} sysinfo_facts = dict.fromkeys( ('system_vendor', 'product_version', 'product_serial', 'product_name', 'product_uuid'), 'NA' ) sysinfo_re = re.compile( r""" ^ (?:Manufacturer:\s+(?P<system_vendor>.+))| (?:Type:\s+(?P<product_name>.+))| (?:Sequence\ Code:\s+0+(?P<product_serial>.+)) $ """, re.VERBOSE | re.MULTILINE ) data = get_file_content('/proc/sysinfo') for match in sysinfo_re.finditer(data): sysinfo_facts.update({k: v for k, v in match.groupdict().items() if v is not None}) return sysinfo_facts def _run_lsblk(self, lsblk_path): # call lsblk and collect all uuids # --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts # this uses the linux major device number # for details see https://www.kernel.org/doc/Documentation/devices.txt args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2'] cmd = [lsblk_path] + args rc, out, err = self.module.run_command(cmd) return rc, out, err def _lsblk_uuid(self): uuids = {} lsblk_path = self.module.get_bin_path("lsblk") if not lsblk_path: return uuids rc, out, err = self._run_lsblk(lsblk_path) if rc != 0: return uuids # each line will be in format: # <devicename><some whitespace><uuid> # /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 for lsblk_line in out.splitlines(): if not lsblk_line: continue line = lsblk_line.strip() fields = line.rsplit(None, 1) if len(fields) < 2: continue device_name, uuid = fields[0].strip(), fields[1].strip() if device_name in uuids: continue uuids[device_name] = uuid return uuids def _udevadm_uuid(self, device): # fallback for versions of lsblk <= 2.23 that don't have --paths, see _run_lsblk() above uuid = 'N/A' udevadm_path = self.module.get_bin_path('udevadm') if not udevadm_path: return uuid cmd = [udevadm_path, 'info', '--query', 'property', '--name', device] rc, out, err = self.module.run_command(cmd) if rc != 0: return uuid # a snippet of the output of the udevadm command below will be: # ... # ID_FS_TYPE=ext4 # ID_FS_USAGE=filesystem # ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179 # ... m = re.search('ID_FS_UUID=(.*)\n', out) if m: uuid = m.group(1) return uuid def _run_findmnt(self, findmnt_path): args = ['--list', '--noheadings', '--notruncate'] cmd = [findmnt_path] + args rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace') return rc, out, err def _find_bind_mounts(self): bind_mounts = set() findmnt_path = self.module.get_bin_path("findmnt") if not findmnt_path: return bind_mounts rc, out, err = self._run_findmnt(findmnt_path) if rc != 0: return bind_mounts # find bind mounts, in case /etc/mtab is a symlink to /proc/mounts for line in out.splitlines(): fields = line.split() # fields[0] is the TARGET, fields[1] is the SOURCE if len(fields) < 2: continue # bind mounts will have a [/directory_name] in the SOURCE column if self.BIND_MOUNT_RE.match(fields[1]): bind_mounts.add(fields[0]) return bind_mounts def _mtab_entries(self): mtab_file = '/etc/mtab' if not os.path.exists(mtab_file): mtab_file = '/proc/mounts' mtab = get_file_content(mtab_file, '') mtab_entries = [] for line in mtab.splitlines(): fields = line.split() if len(fields) < 4: continue mtab_entries.append(fields) return mtab_entries @staticmethod def _replace_octal_escapes_helper(match): # Convert to integer using base8 and then convert to character return chr(int(match.group()[1:], 8)) def _replace_octal_escapes(self, value): return self.OCTAL_ESCAPE_RE.sub(self._replace_octal_escapes_helper, value) def get_mount_info(self, mount, device, uuids): mount_size = get_mount_size(mount) # _udevadm_uuid is a fallback for versions of lsblk <= 2.23 that don't have --paths # see _run_lsblk() above # https://github.com/ansible/ansible/issues/36077 uuid = uuids.get(device, self._udevadm_uuid(device)) return mount_size, uuid def get_mount_facts(self): mounts = [] # gather system lists bind_mounts = self._find_bind_mounts() uuids = self._lsblk_uuid() mtab_entries = self._mtab_entries() # start threads to query each mount results = {} executor = _futures.DaemonThreadPoolExecutor() maxtime = timeout.GATHER_TIMEOUT or timeout.DEFAULT_GATHER_TIMEOUT for fields in mtab_entries: # Transform octal escape sequences fields = [self._replace_octal_escapes(field) for field in fields] device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3] dump, passno = int(fields[4]), int(fields[5]) if not device.startswith(('/', '\\')) and ':/' not in device or fstype == 'none': continue mount_info = {'mount': mount, 'device': device, 'fstype': fstype, 'options': options, 'dump': dump, 'passno': passno} if mount in bind_mounts: # only add if not already there, we might have a plain /etc/mtab if not self.MTAB_BIND_MOUNT_RE.match(options): mount_info['options'] += ",bind" results[mount] = {'info': mount_info, 'timelimit': time.monotonic() + maxtime} results[mount]['extra'] = executor.submit(self.get_mount_info, mount, device, uuids) # done with spawning new workers, start gc executor.shutdown() while results: # wait for workers and get results for mount in list(results): done = False res = results[mount]['extra'] try: if res.done(): done = True if res.exception() is None: mount_size, uuid = res.result() if mount_size: results[mount]['info'].update(mount_size) results[mount]['info']['uuid'] = uuid or 'N/A' else: # failed, try to find out why, if 'res.successful' we know there are no exceptions results[mount]['info']['note'] = f'Could not get extra information: {res.exception()}' elif time.monotonic() > results[mount]['timelimit']: done = True self.module.warn("Timeout exceeded when getting mount info for %s" % mount) results[mount]['info']['note'] = 'Could not get extra information due to timeout' except Exception as e: import traceback done = True results[mount]['info'] = 'N/A' self.module.warn("Error prevented getting extra info for mount %s: [%s] %s." % (mount, type(e), to_text(e))) self.module.debug(traceback.format_exc()) if done: # move results outside and make loop only handle pending mounts.append(results[mount]['info']) del results[mount] # avoid cpu churn, sleep between retrying for loop with remaining mounts time.sleep(0.1) return {'mounts': mounts} def get_device_links(self, link_dir): if not os.path.exists(link_dir): return {} try: retval = collections.defaultdict(set) for entry in os.listdir(link_dir): try: target = os.path.basename(os.readlink(os.path.join(link_dir, entry))) retval[target].add(entry) except OSError: continue return dict((k, list(sorted(v))) for (k, v) in iteritems(retval)) except OSError: return {} def get_all_device_owners(self): try: retval = collections.defaultdict(set) for path in glob.glob('/sys/block/*/slaves/*'): elements = path.split('/') device = elements[3] target = elements[5] retval[target].add(device) return dict((k, list(sorted(v))) for (k, v) in iteritems(retval)) except OSError: return {} def get_all_device_links(self): return { 'ids': self.get_device_links('/dev/disk/by-id'), 'uuids': self.get_device_links('/dev/disk/by-uuid'), 'labels': self.get_device_links('/dev/disk/by-label'), 'masters': self.get_all_device_owners(), } def get_holders(self, block_dev_dict, sysdir): block_dev_dict['holders'] = [] if os.path.isdir(sysdir + "/holders"): for folder in os.listdir(sysdir + "/holders"): if not folder.startswith("dm-"): continue name = get_file_content(sysdir + "/holders/" + folder + "/dm/name") if name: block_dev_dict['holders'].append(name) else: block_dev_dict['holders'].append(folder) def _get_sg_inq_serial(self, sg_inq, block): device = "/dev/%s" % (block) rc, drivedata, err = self.module.run_command([sg_inq, device]) if rc == 0: serial = re.search(r"(?:Unit serial|Serial) number:\s+(\w+)", drivedata) if serial: return serial.group(1) def get_device_facts(self): device_facts = {} device_facts['devices'] = {} lspci = self.module.get_bin_path('lspci') if lspci: rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace') else: pcidata = None try: block_devs = os.listdir("/sys/block") except OSError: return device_facts devs_wwn = {} try: devs_by_id = os.listdir("/dev/disk/by-id") except OSError: pass else: for link_name in devs_by_id: if link_name.startswith("wwn-"): try: wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name)) except OSError: continue devs_wwn[os.path.basename(wwn_link)] = link_name[4:] links = self.get_all_device_links() device_facts['device_links'] = links for block in block_devs: virtual = 1 sysfs_no_links = 0 try: path = os.readlink(os.path.join("/sys/block/", block)) except OSError: e = sys.exc_info()[1] if e.errno == errno.EINVAL: path = block sysfs_no_links = 1 else: continue sysdir = os.path.join("/sys/block", path) if sysfs_no_links == 1: for folder in os.listdir(sysdir): if "device" in folder: virtual = 0 break d = {} d['virtual'] = virtual d['links'] = {} for (link_type, link_values) in iteritems(links): d['links'][link_type] = link_values.get(block, []) diskname = os.path.basename(sysdir) for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']: d[key] = get_file_content(sysdir + "/device/" + key) sg_inq = self.module.get_bin_path('sg_inq') # we can get NVMe device's serial number from /sys/block/<name>/device/serial serial_path = "/sys/block/%s/device/serial" % (block) if sg_inq: serial = self._get_sg_inq_serial(sg_inq, block) if serial: d['serial'] = serial else: serial = get_file_content(serial_path) if serial: d['serial'] = serial d['removable'] = get_file_content(sysdir + '/removable') # Historically, `support_discard` simply returned the value of # `/sys/block/{device}/queue/discard_granularity`. When its value # is `0`, then the block device doesn't support discards; # _however_, it being greater than zero doesn't necessarily mean # that the block device _does_ support discards. # # Another indication that a block device doesn't support discards # is `/sys/block/{device}/queue/discard_max_hw_bytes` being equal # to `0` (with the same caveat as above). So if either of those are # `0`, set `support_discard` to zero, otherwise set it to the value # of `discard_granularity` for backwards compatibility. d['support_discard'] = ( '0' if get_file_content(sysdir + '/queue/discard_max_hw_bytes') == '0' else get_file_content(sysdir + '/queue/discard_granularity') ) if diskname in devs_wwn: d['wwn'] = devs_wwn[diskname] d['partitions'] = {} for folder in os.listdir(sysdir): m = re.search("(" + diskname + r"[p]?\d+)", folder) if m: part = {} partname = m.group(1) part_sysdir = sysdir + "/" + partname part['links'] = {} for (link_type, link_values) in iteritems(links): part['links'][link_type] = link_values.get(partname, []) part['start'] = get_file_content(part_sysdir + "/start", 0) part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size") if not part['sectorsize']: part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512) # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize part['sectors'] = int(get_file_content(part_sysdir + "/size", 0)) * 512 // int(part['sectorsize']) part['size'] = bytes_to_human(float(part['sectors']) * float(part['sectorsize'])) part['uuid'] = get_partition_uuid(partname) self.get_holders(part, part_sysdir) d['partitions'][partname] = part d['rotational'] = get_file_content(sysdir + "/queue/rotational") d['scheduler_mode'] = "" scheduler = get_file_content(sysdir + "/queue/scheduler") if scheduler is not None: m = re.match(r".*?(\[(.*)\])", scheduler) if m: d['scheduler_mode'] = m.group(2) d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size") if not d['sectorsize']: d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512) # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize d['sectors'] = int(get_file_content(sysdir + "/size")) * 512 // int(d['sectorsize']) if not d['sectors']: d['sectors'] = 0 d['size'] = bytes_to_human(float(d['sectors']) * float(d['sectorsize'])) d['host'] = "" # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7). m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir) if m and pcidata: pciid = m.group(1) did = re.escape(pciid) m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE) if m: d['host'] = m.group(1) self.get_holders(d, sysdir) device_facts['devices'][diskname] = d return device_facts def get_uptime_facts(self): uptime_facts = {} uptime_file_content = get_file_content('/proc/uptime') if uptime_file_content: uptime_seconds_string = uptime_file_content.split(' ')[0] uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string)) return uptime_facts def _find_mapper_device_name(self, dm_device): dm_prefix = '/dev/dm-' mapper_device = dm_device if dm_device.startswith(dm_prefix): dmsetup_cmd = self.module.get_bin_path('dmsetup', True) mapper_prefix = '/dev/mapper/' rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) if rc == 0: mapper_device = mapper_prefix + dm_name.rstrip() return mapper_device def get_lvm_facts(self): """ Get LVM Facts if running as root and lvm utils are available """ lvm_facts = {'lvm': 'N/A'} vgs_cmd = self.module.get_bin_path('vgs') if vgs_cmd is None: return lvm_facts if os.getuid() == 0: lvm_util_options = '--noheadings --nosuffix --units g --separator ,' # vgs fields: VG #PV #LV #SN Attr VSize VFree vgs = {} rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_cmd, lvm_util_options)) for vg_line in vg_lines.splitlines(): items = vg_line.strip().split(',') vgs[items[0]] = { 'size_g': items[-2], 'free_g': items[-1], 'num_lvs': items[2], 'num_pvs': items[1] } lvs_path = self.module.get_bin_path('lvs') # lvs fields: # LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert lvs = {} if lvs_path: rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options)) for lv_line in lv_lines.splitlines(): items = lv_line.strip().split(',') lvs[items[0]] = {'size_g': items[3], 'vg': items[1]} pvs_path = self.module.get_bin_path('pvs') # pvs fields: PV VG #Fmt #Attr PSize PFree pvs = {} if pvs_path: rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options)) for pv_line in pv_lines.splitlines(): items = pv_line.strip().split(',') pvs[self._find_mapper_device_name(items[0])] = { 'size_g': items[4], 'free_g': items[5], 'vg': items[1]} lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs} return lvm_facts class LinuxHardwareCollector(HardwareCollector): _platform = 'Linux' _fact_class = LinuxHardware required_facts = set(['platform'])
37,316
Python
.py
785
33.987261
128
0.53453
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,298
hurd.py
ansible_ansible/lib/ansible/module_utils/facts/hardware/hurd.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations from ansible.module_utils.facts.timeout import TimeoutError from ansible.module_utils.facts.hardware.base import HardwareCollector from ansible.module_utils.facts.hardware.linux import LinuxHardware class HurdHardware(LinuxHardware): """ GNU Hurd specific subclass of Hardware. Define memory and mount facts based on procfs compatibility translator mimicking the interface of the Linux kernel. """ platform = 'GNU' def populate(self, collected_facts=None): hardware_facts = {} uptime_facts = self.get_uptime_facts() memory_facts = self.get_memory_facts() mount_facts = {} try: mount_facts = self.get_mount_facts() except TimeoutError: pass hardware_facts.update(uptime_facts) hardware_facts.update(memory_facts) hardware_facts.update(mount_facts) return hardware_facts class HurdHardwareCollector(HardwareCollector): _fact_class = HurdHardware _platform = 'GNU'
1,700
Python
.py
41
36.780488
73
0.739078
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
14,299
sunos.py
ansible_ansible/lib/ansible/module_utils/facts/hardware/sunos.py
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import re import time from ansible.module_utils.common.locale import get_best_parsable_locale from ansible.module_utils.common.text.formatters import bytes_to_human from ansible.module_utils.facts.utils import get_file_content, get_mount_size from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector from ansible.module_utils.facts import timeout from ansible.module_utils.six.moves import reduce class SunOSHardware(Hardware): """ In addition to the generic memory and cpu facts, this also sets swap_reserved_mb and swap_allocated_mb that is available from *swap -s*. """ platform = 'SunOS' def populate(self, collected_facts=None): hardware_facts = {} # FIXME: could pass to run_command(environ_update), but it also tweaks the env # of the parent process instead of altering an env provided to Popen() # Use C locale for hardware collection helpers to avoid locale specific number formatting (#24542) locale = get_best_parsable_locale(self.module) self.module.run_command_environ_update = {'LANG': locale, 'LC_ALL': locale, 'LC_NUMERIC': locale} cpu_facts = self.get_cpu_facts() memory_facts = self.get_memory_facts() dmi_facts = self.get_dmi_facts() device_facts = self.get_device_facts() uptime_facts = self.get_uptime_facts() mount_facts = {} try: mount_facts = self.get_mount_facts() except timeout.TimeoutError: pass hardware_facts.update(cpu_facts) hardware_facts.update(memory_facts) hardware_facts.update(dmi_facts) hardware_facts.update(device_facts) hardware_facts.update(uptime_facts) hardware_facts.update(mount_facts) return hardware_facts def get_cpu_facts(self, collected_facts=None): physid = 0 sockets = {} cpu_facts = {} collected_facts = collected_facts or {} rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info") cpu_facts['processor'] = [] for line in out.splitlines(): if len(line) < 1: continue data = line.split(None, 1) key = data[0].strip() # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9. if key == 'module:': brand = '' elif key == 'brand': brand = data[1].strip() elif key == 'clock_MHz': clock_mhz = data[1].strip() elif key == 'implementation': processor = brand or data[1].strip() # Add clock speed to description for SPARC CPU # FIXME if collected_facts.get('ansible_machine') != 'i86pc': processor += " @ " + clock_mhz + "MHz" if 'ansible_processor' not in collected_facts: cpu_facts['processor'] = [] cpu_facts['processor'].append(processor) elif key == 'chip_id': physid = data[1].strip() if physid not in sockets: sockets[physid] = 1 else: sockets[physid] += 1 # Counting cores on Solaris can be complicated. # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu # Treat 'processor_count' as physical sockets and 'processor_cores' as # virtual CPUs visible to Solaris. Not a true count of cores for modern SPARC as # these processors have: sockets -> cores -> threads/virtual CPU. if len(sockets) > 0: cpu_facts['processor_count'] = len(sockets) cpu_facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values()) else: cpu_facts['processor_cores'] = 'NA' cpu_facts['processor_count'] = len(cpu_facts['processor']) return cpu_facts def get_memory_facts(self): memory_facts = {} rc, out, err = self.module.run_command(["/usr/sbin/prtconf"]) for line in out.splitlines(): if 'Memory size' in line: memory_facts['memtotal_mb'] = int(line.split()[2]) rc, out, err = self.module.run_command("/usr/sbin/swap -s") allocated = int(out.split()[1][:-1]) reserved = int(out.split()[5][:-1]) used = int(out.split()[8][:-1]) free = int(out.split()[10][:-1]) memory_facts['swapfree_mb'] = free // 1024 memory_facts['swaptotal_mb'] = (free + used) // 1024 memory_facts['swap_allocated_mb'] = allocated // 1024 memory_facts['swap_reserved_mb'] = reserved // 1024 return memory_facts @timeout.timeout() def get_mount_facts(self): mount_facts = {} mount_facts['mounts'] = [] # For a detailed format description see mnttab(4) # special mount_point fstype options time fstab = get_file_content('/etc/mnttab') if fstab: for line in fstab.splitlines(): fields = line.split('\t') mount_statvfs_info = get_mount_size(fields[1]) mount_info = {'mount': fields[1], 'device': fields[0], 'fstype': fields[2], 'options': fields[3], 'time': fields[4]} mount_info.update(mount_statvfs_info) mount_facts['mounts'].append(mount_info) return mount_facts def get_dmi_facts(self): dmi_facts = {} # On Solaris 8 the prtdiag wrapper is absent from /usr/sbin, # but that's okay, because we know where to find the real thing: rc, platform, err = self.module.run_command('/usr/bin/uname -i') platform_sbin = '/usr/platform/' + platform.rstrip() + '/sbin' prtdiag_path = self.module.get_bin_path( "prtdiag", opt_dirs=[platform_sbin] ) if prtdiag_path is None: return dmi_facts rc, out, err = self.module.run_command(prtdiag_path) # rc returns 1 if out: system_conf = out.split('\n')[0] # If you know of any other manufacturers whose names appear in # the first line of prtdiag's output, please add them here: vendors = [ "Fujitsu", "Oracle Corporation", "QEMU", "Sun Microsystems", "VMware, Inc.", ] vendor_regexp = "|".join(map(re.escape, vendors)) system_conf_regexp = (r'System Configuration:\s+' + r'(' + vendor_regexp + r')\s+' + r'(?:sun\w+\s+)?' + r'(.+)') found = re.match(system_conf_regexp, system_conf) if found: dmi_facts['system_vendor'] = found.group(1) dmi_facts['product_name'] = found.group(2) return dmi_facts def get_device_facts(self): # Device facts are derived for sdderr kstats. This code does not use the # full output, but rather queries for specific stats. # Example output: # sderr:0:sd0,err:Hard Errors 0 # sderr:0:sd0,err:Illegal Request 6 # sderr:0:sd0,err:Media Error 0 # sderr:0:sd0,err:Predictive Failure Analysis 0 # sderr:0:sd0,err:Product VBOX HARDDISK 9 # sderr:0:sd0,err:Revision 1.0 # sderr:0:sd0,err:Serial No VB0ad2ec4d-074a # sderr:0:sd0,err:Size 53687091200 # sderr:0:sd0,err:Soft Errors 0 # sderr:0:sd0,err:Transport Errors 0 # sderr:0:sd0,err:Vendor ATA device_facts = {} device_facts['devices'] = {} disk_stats = { 'Product': 'product', 'Revision': 'revision', 'Serial No': 'serial', 'Size': 'size', 'Vendor': 'vendor', 'Hard Errors': 'hard_errors', 'Soft Errors': 'soft_errors', 'Transport Errors': 'transport_errors', 'Media Error': 'media_errors', 'Predictive Failure Analysis': 'predictive_failure_analysis', 'Illegal Request': 'illegal_request', } cmd = ['/usr/bin/kstat', '-p'] for ds in disk_stats: cmd.append('sderr:::%s' % ds) d = {} rc, out, err = self.module.run_command(cmd) if rc != 0: return device_facts sd_instances = frozenset(line.split(':')[1] for line in out.split('\n') if line.startswith('sderr')) for instance in sd_instances: lines = (line for line in out.split('\n') if ':' in line and line.split(':')[1] == instance) for line in lines: text, value = line.split('\t') stat = text.split(':')[3] if stat == 'Size': d[disk_stats.get(stat)] = bytes_to_human(float(value)) else: d[disk_stats.get(stat)] = value.rstrip() diskname = 'sd' + instance device_facts['devices'][diskname] = d d = {} return device_facts def get_uptime_facts(self): uptime_facts = {} # sample kstat output: # unix:0:system_misc:boot_time 1548249689 rc, out, err = self.module.run_command('/usr/bin/kstat -p unix:0:system_misc:boot_time') if rc != 0: return # uptime = $current_time - $boot_time uptime_facts['uptime_seconds'] = int(time.time() - int(out.split('\t')[1])) return uptime_facts class SunOSHardwareCollector(HardwareCollector): _fact_class = SunOSHardware _platform = 'SunOS' required_facts = set(['platform'])
10,609
Python
.py
234
34.367521
108
0.572384
ansible/ansible
62,258
23,791
861
GPL-3.0
9/5/2024, 5:11:58 PM (Europe/Amsterdam)