id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,000
|
test_winrm.py
|
ansible_ansible/test/units/plugins/connection/test_winrm.py
|
# -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <jborean@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import typing as t
import pytest
from io import StringIO
from unittest.mock import MagicMock
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.module_utils.common.text.converters import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader
from ansible.plugins.connection import winrm
pytest.importorskip("winrm")
class TestConnectionWinRM(object):
OPTIONS_DATA: tuple[tuple[dict[str, t.Any], dict[str, t.Any], dict[str, t.Any], bool], ...] = (
# default options
(
{'_extras': {}},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_connection_timeout': None,
'_winrm_host': 'inventory_hostname',
'_winrm_kwargs': {'username': None, 'password': None},
'_winrm_pass': None,
'_winrm_path': '/wsman',
'_winrm_port': 5986,
'_winrm_scheme': 'https',
'_winrm_transport': ['ssl'],
'_winrm_user': None
},
False
),
# http through port
(
{'_extras': {}, 'ansible_port': 5985},
{},
{
'_winrm_kwargs': {'username': None, 'password': None},
'_winrm_port': 5985,
'_winrm_scheme': 'http',
'_winrm_transport': ['plaintext'],
},
False
),
# kerberos user with kerb present
(
{'_extras': {}, 'ansible_user': 'user@domain.com'},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': 'user@domain.com',
'password': None},
'_winrm_pass': None,
'_winrm_transport': ['kerberos', 'ssl'],
'_winrm_user': 'user@domain.com'
},
True
),
# kerberos user without kerb present
(
{'_extras': {}, 'ansible_user': 'user@domain.com'},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': 'user@domain.com',
'password': None},
'_winrm_pass': None,
'_winrm_transport': ['ssl'],
'_winrm_user': 'user@domain.com'
},
False
),
# kerberos user with managed ticket (implicit)
(
{'_extras': {}, 'ansible_user': 'user@domain.com'},
{'remote_password': 'pass'},
{
'_kerb_managed': True,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': 'user@domain.com',
'password': 'pass'},
'_winrm_pass': 'pass',
'_winrm_transport': ['kerberos', 'ssl'],
'_winrm_user': 'user@domain.com'
},
True
),
# kerb with managed ticket (explicit)
(
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_kinit_mode': 'managed'},
{'password': 'pass'},
{
'_kerb_managed': True,
},
True
),
# kerb with unmanaged ticket (explicit))
(
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_kinit_mode': 'manual'},
{'password': 'pass'},
{
'_kerb_managed': False,
},
True
),
# transport override (single)
(
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_transport': 'ntlm'},
{},
{
'_winrm_kwargs': {'username': 'user@domain.com',
'password': None},
'_winrm_pass': None,
'_winrm_transport': ['ntlm'],
},
False
),
# transport override (list)
(
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_transport': ['ntlm', 'certificate']},
{},
{
'_winrm_kwargs': {'username': 'user@domain.com',
'password': None},
'_winrm_pass': None,
'_winrm_transport': ['ntlm', 'certificate'],
},
False
),
# winrm extras
(
{'_extras': {'ansible_winrm_server_cert_validation': 'ignore',
'ansible_winrm_service': 'WSMAN'}},
{},
{
'_winrm_kwargs': {'username': None, 'password': None,
'server_cert_validation': 'ignore',
'service': 'WSMAN'},
},
False
),
# direct override
(
{'_extras': {}, 'ansible_winrm_connection_timeout': 5},
{'connection_timeout': 10},
{
'_winrm_connection_timeout': 10,
},
False
),
# password as ansible_password
(
{'_extras': {}, 'ansible_password': 'pass'},
{},
{
'_winrm_pass': 'pass',
'_winrm_kwargs': {'username': None, 'password': 'pass'}
},
False
),
# password as ansible_winrm_pass
(
{'_extras': {}, 'ansible_winrm_pass': 'pass'},
{},
{
'_winrm_pass': 'pass',
'_winrm_kwargs': {'username': None, 'password': 'pass'}
},
False
),
# password as ansible_winrm_password
(
{'_extras': {}, 'ansible_winrm_password': 'pass'},
{},
{
'_winrm_pass': 'pass',
'_winrm_kwargs': {'username': None, 'password': 'pass'}
},
False
),
)
@pytest.mark.parametrize('options, direct, expected, kerb',
((o, d, e, k) for o, d, e, k in OPTIONS_DATA))
def test_set_options(self, options, direct, expected, kerb):
winrm.HAVE_KERBEROS = kerb
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options, direct=direct)
conn._build_winrm_kwargs()
for attr, expected in expected.items():
actual = getattr(conn, attr)
assert actual == expected, \
"winrm attr '%s', actual '%s' != expected '%s'"\
% (attr, actual, expected)
class TestWinRMKerbAuth(object):
@pytest.mark.parametrize('options, expected', [
[{"_extras": {}},
(["kinit", "user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
(["kinit2", "user@domain"],)],
[{"_extras": {'ansible_winrm_kerberos_delegation': True}},
(["kinit", "-f", "user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_args': '-f -p'},
(["kinit", "-f", "-p", "user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kerberos_delegation': True, 'ansible_winrm_kinit_args': '-p'},
(["kinit", "-p", "user@domain"],)]
])
def test_kinit_success_subprocess(self, monkeypatch, options, expected):
def mock_communicate(input=None, timeout=None):
return b"", b""
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 0
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
conn._kerb_auth("user@domain", "pass")
mock_calls = mock_popen.mock_calls
assert len(mock_calls) == 1
assert mock_calls[0][1] == expected
actual_env = mock_calls[0][2]['env']
assert sorted(list(actual_env.keys())) == ['KRB5CCNAME', 'PATH']
assert actual_env['KRB5CCNAME'].startswith("FILE:/")
assert actual_env['PATH'] == os.environ['PATH']
@pytest.mark.parametrize('options, expected', [
[{"_extras": {}},
("kinit", ["user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
("kinit2", ["user@domain"],)],
[{"_extras": {'ansible_winrm_kerberos_delegation': True}},
("kinit", ["-f", "user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_args': '-f -p'},
("kinit", ["-f", "-p", "user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kerberos_delegation': True, 'ansible_winrm_kinit_args': '-p'},
("kinit", ["-p", "user@domain"],)]
])
def test_kinit_success_pexpect(self, monkeypatch, options, expected):
pytest.importorskip("pexpect")
mock_pexpect = MagicMock()
mock_pexpect.return_value.exitstatus = 0
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
conn._kerb_auth("user@domain", "pass")
mock_calls = mock_pexpect.mock_calls
assert mock_calls[0][1] == expected
actual_env = mock_calls[0][2]['env']
assert sorted(list(actual_env.keys())) == ['KRB5CCNAME', 'PATH']
assert actual_env['KRB5CCNAME'].startswith("FILE:/")
assert actual_env['PATH'] == os.environ['PATH']
assert mock_calls[0][2]['echo'] is False
assert mock_calls[1][0] == "().expect"
assert mock_calls[1][1] == (".*:",)
assert mock_calls[2][0] == "().sendline"
assert mock_calls[2][1] == ("pass",)
assert mock_calls[3][0] == "().read"
assert mock_calls[4][0] == "().wait"
def test_kinit_with_missing_executable_subprocess(self, monkeypatch):
expected_err = "[Errno 2] No such file or directory: " \
"'/fake/kinit': '/fake/kinit'"
mock_popen = MagicMock(side_effect=OSError(expected_err))
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("user@domain", "pass")
assert str(err.value) == "Kerberos auth failure when calling " \
"kinit cmd '/fake/kinit': %s" % expected_err
def test_kinit_with_missing_executable_pexpect(self, monkeypatch):
pexpect = pytest.importorskip("pexpect")
expected_err = "The command was not found or was not " \
"executable: /fake/kinit"
mock_pexpect = \
MagicMock(side_effect=pexpect.ExceptionPexpect(expected_err))
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
conn.set_options(var_options=options)
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("user@domain", "pass")
assert str(err.value) == "Kerberos auth failure when calling " \
"kinit cmd '/fake/kinit': %s" % expected_err
def test_kinit_error_subprocess(self, monkeypatch):
expected_err = "kinit: krb5_parse_name: " \
"Configuration file does not specify default realm"
def mock_communicate(input=None, timeout=None):
return b"", to_bytes(expected_err)
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 1
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("invaliduser", "pass")
assert str(err.value) == \
"Kerberos auth failure for principal invaliduser with " \
"subprocess: %s" % (expected_err)
def test_kinit_error_pexpect(self, monkeypatch):
pytest.importorskip("pexpect")
expected_err = "Configuration file does not specify default realm"
mock_pexpect = MagicMock()
mock_pexpect.return_value.expect = MagicMock(side_effect=OSError)
mock_pexpect.return_value.read.return_value = to_bytes(expected_err)
mock_pexpect.return_value.exitstatus = 1
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("invaliduser", "pass")
assert str(err.value) == \
"Kerberos auth failure for principal invaliduser with " \
"pexpect: %s" % (expected_err)
def test_kinit_error_pass_in_output_subprocess(self, monkeypatch):
def mock_communicate(input=None, timeout=None):
return b"", b"Error with kinit\n" + input
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 1
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("username", "password")
assert str(err.value) == \
"Kerberos auth failure for principal username with subprocess: " \
"Error with kinit\n<redacted>"
def test_kinit_error_pass_in_output_pexpect(self, monkeypatch):
pytest.importorskip("pexpect")
mock_pexpect = MagicMock()
mock_pexpect.return_value.expect = MagicMock()
mock_pexpect.return_value.read.return_value = \
b"Error with kinit\npassword\n"
mock_pexpect.return_value.exitstatus = 1
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("username", "password")
assert str(err.value) == \
"Kerberos auth failure for principal username with pexpect: " \
"Error with kinit\n<redacted>"
def test_exec_command_with_timeout(self, monkeypatch):
requests_exc = pytest.importorskip("requests.exceptions")
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
mock_proto = MagicMock()
mock_proto.run_command.side_effect = requests_exc.Timeout("msg")
conn._connected = True
conn._winrm_host = 'hostname'
monkeypatch.setattr(conn, "_winrm_connect", lambda: mock_proto)
with pytest.raises(AnsibleConnectionFailure) as e:
conn.exec_command('cmd', in_data=None, sudoable=True)
assert str(e.value) == "winrm connection error: msg"
def test_exec_command_get_output_timeout(self, monkeypatch):
requests_exc = pytest.importorskip("requests.exceptions")
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
mock_proto = MagicMock()
mock_proto.run_command.return_value = "command_id"
mock_proto.send_message.side_effect = requests_exc.Timeout("msg")
conn._connected = True
conn._winrm_host = 'hostname'
monkeypatch.setattr(conn, "_winrm_connect", lambda: mock_proto)
with pytest.raises(AnsibleConnectionFailure) as e:
conn.exec_command('cmd', in_data=None, sudoable=True)
assert str(e.value) == "winrm connection error: msg"
def test_connect_failure_auth_401(self, monkeypatch):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"ansible_winrm_transport": "basic", "_extras": {}})
mock_proto = MagicMock()
mock_proto.open_shell.side_effect = ValueError("Custom exc Code 401")
mock_proto_init = MagicMock()
mock_proto_init.return_value = mock_proto
monkeypatch.setattr(winrm, "Protocol", mock_proto_init)
with pytest.raises(AnsibleConnectionFailure, match="the specified credentials were rejected by the server"):
conn.exec_command('cmd', in_data=None, sudoable=True)
def test_connect_failure_other_exception(self, monkeypatch):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"ansible_winrm_transport": "basic", "_extras": {}})
mock_proto = MagicMock()
mock_proto.open_shell.side_effect = ValueError("Custom exc")
mock_proto_init = MagicMock()
mock_proto_init.return_value = mock_proto
monkeypatch.setattr(winrm, "Protocol", mock_proto_init)
with pytest.raises(AnsibleConnectionFailure, match="basic: Custom exc"):
conn.exec_command('cmd', in_data=None, sudoable=True)
def test_connect_failure_operation_timed_out(self, monkeypatch):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"ansible_winrm_transport": "basic", "_extras": {}})
mock_proto = MagicMock()
mock_proto.open_shell.side_effect = ValueError("Custom exc Operation timed out")
mock_proto_init = MagicMock()
mock_proto_init.return_value = mock_proto
monkeypatch.setattr(winrm, "Protocol", mock_proto_init)
with pytest.raises(AnsibleError, match="the connection attempt timed out"):
conn.exec_command('cmd', in_data=None, sudoable=True)
def test_connect_no_transport(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
conn._build_winrm_kwargs()
conn._winrm_transport = []
with pytest.raises(AnsibleError, match="No transport found for WinRM connection"):
conn._winrm_connect()
| 20,215
|
Python
|
.py
| 459
| 32.867102
| 116
| 0.5569
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,001
|
test_local.py
|
ansible_ansible/test/units/plugins/connection/test_local.py
|
#
# (c) 2020 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from io import StringIO
import unittest
from ansible.plugins.connection import local
from ansible.playbook.play_context import PlayContext
class TestLocalConnectionClass(unittest.TestCase):
def test_local_connection_module(self):
play_context = PlayContext()
play_context.prompt = (
'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
)
in_stream = StringIO()
self.assertIsInstance(local.Connection(play_context, in_stream), local.Connection)
| 1,239
|
Python
|
.py
| 30
| 38.2
| 90
| 0.762063
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,002
|
test_powershell.py
|
ansible_ansible/test/units/plugins/shell/test_powershell.py
|
from __future__ import annotations
import pytest
from ansible.plugins.shell.powershell import _parse_clixml, ShellModule
def test_parse_clixml_empty():
empty = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04"></Objs>'
expected = b''
actual = _parse_clixml(empty)
assert actual == expected
def test_parse_clixml_with_progress():
progress = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
b'<Obj S="progress" RefId="0"><TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
b'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj></Objs>'
expected = b''
actual = _parse_clixml(progress)
assert actual == expected
def test_parse_clixml_single_stream():
single_stream = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
b'<S S="Error">fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_</S>' \
b'<S S="Error">the spelling of the name, or if a path was included._x000D__x000A_</S>' \
b'<S S="Error">At line:1 char:1_x000D__x000A_</S>' \
b'<S S="Error">+ fake cmdlet_x000D__x000A_</S><S S="Error">+ ~~~~_x000D__x000A_</S>' \
b'<S S="Error"> + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_</S>' \
b'<S S="Error"> + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_</S>' \
b'<S S="Error"> _x000D__x000A_</S>' \
b'</Objs>'
expected = b"fake : The term 'fake' is not recognized as the name of a cmdlet. Check \r\n" \
b"the spelling of the name, or if a path was included.\r\n" \
b"At line:1 char:1\r\n" \
b"+ fake cmdlet\r\n" \
b"+ ~~~~\r\n" \
b" + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException\r\n" \
b" + FullyQualifiedErrorId : CommandNotFoundException\r\n" \
b" \r\n"
actual = _parse_clixml(single_stream)
assert actual == expected
def test_parse_clixml_multiple_streams():
multiple_stream = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
b'<S S="Error">fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_</S>' \
b'<S S="Error">the spelling of the name, or if a path was included._x000D__x000A_</S>' \
b'<S S="Error">At line:1 char:1_x000D__x000A_</S>' \
b'<S S="Error">+ fake cmdlet_x000D__x000A_</S><S S="Error">+ ~~~~_x000D__x000A_</S>' \
b'<S S="Error"> + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_</S>' \
b'<S S="Error"> + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_</S><S S="Error"> _x000D__x000A_</S>' \
b'<S S="Info">hi info</S>' \
b'<S S="Info">other</S>' \
b'</Objs>'
expected = b"hi infoother"
actual = _parse_clixml(multiple_stream, stream="Info")
assert actual == expected
def test_parse_clixml_multiple_elements():
multiple_elements = b'#< CLIXML\r\n#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
b'<Obj S="progress" RefId="0"><TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
b'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
b'<S S="Error">Error 1</S></Objs>' \
b'<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04"><Obj S="progress" RefId="0">' \
b'<TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
b'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
b'<Obj S="progress" RefId="1"><TNRef RefId="0" /><MS><I64 N="SourceId">2</I64>' \
b'<PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
b'<S S="Error">Error 2</S></Objs>'
expected = b"Error 1\r\nError 2"
actual = _parse_clixml(multiple_elements)
assert actual == expected
@pytest.mark.parametrize('clixml, expected', [
('', ''),
('just newline _x000A_', 'just newline \n'),
('surrogate pair _xD83C__xDFB5_', 'surrogate pair 🎵'),
('null char _x0000_', 'null char \0'),
('normal char _x0061_', 'normal char a'),
('escaped literal _x005F_x005F_', 'escaped literal _x005F_'),
('underscope before escape _x005F__x000A_', 'underscope before escape _\n'),
('surrogate high _xD83C_', 'surrogate high \uD83C'),
('surrogate low _xDFB5_', 'surrogate low \uDFB5'),
('lower case hex _x005f_', 'lower case hex _'),
('invalid hex _x005G_', 'invalid hex _x005G_'),
])
def test_parse_clixml_with_comlex_escaped_chars(clixml, expected):
clixml_data = (
'<# CLIXML\r\n'
'<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">'
f'<S S="Error">{clixml}</S>'
'</Objs>'
).encode()
b_expected = expected.encode(errors="surrogatepass")
actual = _parse_clixml(clixml_data)
assert actual == b_expected
def test_join_path_unc():
pwsh = ShellModule()
unc_path_parts = ['\\\\host\\share\\dir1\\\\dir2\\', '\\dir3/dir4', 'dir5', 'dir6\\']
expected = '\\\\host\\share\\dir1\\dir2\\dir3\\dir4\\dir5\\dir6'
actual = pwsh.join_path(*unc_path_parts)
assert actual == expected
| 6,517
|
Python
|
.py
| 96
| 56.375
| 150
| 0.561836
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,003
|
test_cmd.py
|
ansible_ansible/test/units/plugins/shell/test_cmd.py
|
from __future__ import annotations
import pytest
from ansible.plugins.shell.cmd import ShellModule
@pytest.mark.parametrize('s, expected', [
['arg1', 'arg1'],
[None, '""'],
['arg1 and 2', '^"arg1 and 2^"'],
['malicious argument\\"&whoami', '^"malicious argument\\\\^"^&whoami^"'],
['C:\\temp\\some ^%file% > nul', '^"C:\\temp\\some ^^^%file^% ^> nul^"']
])
def test_quote_args(s, expected):
cmd = ShellModule()
actual = cmd.quote(s)
assert actual == expected
| 495
|
Python
|
.py
| 14
| 31.785714
| 77
| 0.612159
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,004
|
test_ini.py
|
ansible_ansible/test/units/plugins/lookup/test_ini.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import unittest
from ansible.plugins.lookup.ini import _parse_params
class TestINILookup(unittest.TestCase):
# Currently there isn't a new-style
old_style_params_data = (
# Simple case
dict(
term=u'keyA section=sectionA file=/path/to/file',
expected=[u'file=/path/to/file', u'keyA', u'section=sectionA'],
),
dict(
term=u'keyB section=sectionB with space file=/path/with/embedded spaces and/file',
expected=[u'file=/path/with/embedded spaces and/file', u'keyB', u'section=sectionB with space'],
),
dict(
term=u'keyC section=sectionC file=/path/with/equals/cn=com.ansible',
expected=[u'file=/path/with/equals/cn=com.ansible', u'keyC', u'section=sectionC'],
),
dict(
term=u'keyD section=sectionD file=/path/with space and/equals/cn=com.ansible',
expected=[u'file=/path/with space and/equals/cn=com.ansible', u'keyD', u'section=sectionD'],
),
dict(
term=u'keyE section=sectionE file=/path/with/unicode/くらとみ/file',
expected=[u'file=/path/with/unicode/くらとみ/file', u'keyE', u'section=sectionE'],
),
dict(
term=u'keyF section=sectionF file=/path/with/utf 8 and spaces/くらとみ/file',
expected=[u'file=/path/with/utf 8 and spaces/くらとみ/file', u'keyF', u'section=sectionF'],
),
)
def test_parse_parameters(self):
pvals = {'file': '', 'section': '', 'key': '', 'type': '', 're': '', 'default': '', 'encoding': ''}
for testcase in self.old_style_params_data:
# print(testcase)
params = _parse_params(testcase['term'], pvals)
params.sort()
self.assertEqual(params, testcase['expected'])
| 2,600
|
Python
|
.py
| 56
| 38.892857
| 108
| 0.650439
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,005
|
test_env.py
|
ansible_ansible/test/units/plugins/lookup/test_env.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Abhay Kadam <abhaykadam88@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import pytest
from ansible.plugins.loader import lookup_loader
@pytest.mark.parametrize('env_var,exp_value', [
('foo', 'bar'),
('equation', 'a=b*100')
])
def test_env_var_value(monkeypatch, env_var, exp_value):
monkeypatch.setattr('os.environ.get', lambda x, y: exp_value)
env_lookup = lookup_loader.get('env')
retval = env_lookup.run([env_var], None)
assert retval == [exp_value]
@pytest.mark.parametrize('env_var,exp_value', [
('simple_var', 'alpha-β-gamma'),
('the_var', 'ãnˈsiβle')
])
def test_utf8_env_var_value(monkeypatch, env_var, exp_value):
monkeypatch.setattr('os.environ.get', lambda x, y: exp_value)
env_lookup = lookup_loader.get('env')
retval = env_lookup.run([env_var], None)
assert retval == [exp_value]
| 994
|
Python
|
.py
| 24
| 37.875
| 92
| 0.687565
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,006
|
test_url.py
|
ansible_ansible/test/units/plugins/lookup/test_url.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Sam Doran <sdoran@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import pytest
from ansible.plugins.loader import lookup_loader
@pytest.mark.parametrize(
('kwargs', 'agent'),
(
({}, 'ansible-httpget'),
({'http_agent': 'SuperFox'}, 'SuperFox'),
)
)
def test_user_agent(mocker, kwargs, agent):
mock_open_url = mocker.patch('ansible.plugins.lookup.url.open_url', side_effect=AttributeError('raised intentionally'))
url_lookup = lookup_loader.get('url')
with pytest.raises(AttributeError):
url_lookup.run(['https://nourl'], **kwargs)
assert 'http_agent' in mock_open_url.call_args.kwargs
assert mock_open_url.call_args.kwargs['http_agent'] == agent
| 846
|
Python
|
.py
| 20
| 38.25
| 123
| 0.690621
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,007
|
test_password.py
|
ansible_ansible/test/units/plugins/lookup/test_password.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
try:
import passlib
from passlib.handlers import pbkdf2
except ImportError: # pragma: nocover
passlib = None
pbkdf2 = None
import pytest
from units.mock.loader import DictDataLoader
import unittest
from unittest.mock import mock_open, patch
from ansible.errors import AnsibleError
import builtins
from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins.loader import PluginLoader, lookup_loader
from ansible.plugins.lookup import password
DEFAULT_LENGTH = 20
DEFAULT_CHARS = sorted([u'ascii_letters', u'digits', u".,:-_"])
DEFAULT_CANDIDATE_CHARS = u'.,:-_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
# Currently there isn't a new-style
old_style_params_data = (
# Simple case
dict(
term=u'/path/to/file',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
# Special characters in path
dict(
term=u'/path/with/embedded spaces and/file',
filename=u'/path/with/embedded spaces and/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
dict(
term=u'/path/with/equals/cn=com.ansible',
filename=u'/path/with/equals/cn=com.ansible',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
dict(
term=u'/path/with/unicode/くらとみ/file',
filename=u'/path/with/unicode/くらとみ/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
# Mix several special chars
dict(
term=u'/path/with/utf 8 and spaces/くらとみ/file',
filename=u'/path/with/utf 8 and spaces/くらとみ/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
dict(
term=u'/path/with/encoding=unicode/くらとみ/file',
filename=u'/path/with/encoding=unicode/くらとみ/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
dict(
term=u'/path/with/encoding=unicode/くらとみ/and spaces file',
filename=u'/path/with/encoding=unicode/くらとみ/and spaces file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
# Simple parameters
dict(
term=u'/path/to/file length=42',
filename=u'/path/to/file',
params=dict(length=42, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
dict(
term=u'/path/to/file encrypt=pbkdf2_sha256',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt='pbkdf2_sha256', ident=None, chars=DEFAULT_CHARS, seed=None),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
dict(
term=u'/path/to/file chars=abcdefghijklmnop',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=[u'abcdefghijklmnop'], seed=None),
candidate_chars=u'abcdefghijklmnop',
),
dict(
term=u'/path/to/file chars=digits,abc,def',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=sorted([u'digits', u'abc', u'def']), seed=None),
candidate_chars=u'abcdef0123456789',
),
dict(
term=u'/path/to/file seed=1',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=DEFAULT_CHARS, seed='1'),
candidate_chars=DEFAULT_CANDIDATE_CHARS,
),
# Including comma in chars
dict(
term=u'/path/to/file chars=abcdefghijklmnop,,digits',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=sorted([u'abcdefghijklmnop', u',', u'digits']), seed=None),
candidate_chars=u',abcdefghijklmnop0123456789',
),
dict(
term=u'/path/to/file chars=,,',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=[u','], seed=None),
candidate_chars=u',',
),
# Including = in chars
dict(
term=u'/path/to/file chars=digits,=,,',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=sorted([u'digits', u'=', u',']), seed=None),
candidate_chars=u',=0123456789',
),
dict(
term=u'/path/to/file chars=digits,abc=def',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=sorted([u'digits', u'abc=def']), seed=None),
candidate_chars=u'abc=def0123456789',
),
# Including unicode in chars
dict(
term=u'/path/to/file chars=digits,くらとみ,,',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=sorted([u'digits', u'くらとみ', u',']), seed=None),
candidate_chars=u',0123456789くらとみ',
),
# Including only unicode in chars
dict(
term=u'/path/to/file chars=くらとみ',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=sorted([u'くらとみ']), seed=None),
candidate_chars=u'くらとみ',
),
# Include ':' in path
dict(
term=u'/path/to/file_with:colon chars=ascii_letters,digits',
filename=u'/path/to/file_with:colon',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None,
chars=sorted([u'ascii_letters', u'digits']), seed=None),
candidate_chars=u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
),
# Including special chars in both path and chars
# Special characters in path
dict(
term=u'/path/with/embedded spaces and/file chars=abc=def',
filename=u'/path/with/embedded spaces and/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=[u'abc=def'], seed=None),
candidate_chars=u'abc=def',
),
dict(
term=u'/path/with/equals/cn=com.ansible chars=abc=def',
filename=u'/path/with/equals/cn=com.ansible',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=[u'abc=def'], seed=None),
candidate_chars=u'abc=def',
),
dict(
term=u'/path/with/unicode/くらとみ/file chars=くらとみ',
filename=u'/path/with/unicode/くらとみ/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, ident=None, chars=[u'くらとみ'], seed=None),
candidate_chars=u'くらとみ',
),
)
class TestParseParameters(unittest.TestCase):
def setUp(self):
self.fake_loader = DictDataLoader({'/path/to/somewhere': 'sdfsdf'})
self.password_lookup = lookup_loader.get('password')
self.password_lookup._loader = self.fake_loader
def test(self):
for testcase in old_style_params_data:
filename, params = self.password_lookup._parse_parameters(testcase['term'])
params['chars'].sort()
self.assertEqual(filename, testcase['filename'])
self.assertEqual(params, testcase['params'])
def test_unrecognized_value(self):
testcase = dict(term=u'/path/to/file chars=くらとみi sdfsdf',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']),
candidate_chars=u'くらとみ')
self.assertRaises(AnsibleError, self.password_lookup._parse_parameters, testcase['term'])
def test_invalid_params(self):
testcase = dict(term=u'/path/to/file chars=くらとみi somethign_invalid=123',
filename=u'/path/to/file',
params=dict(length=DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']),
candidate_chars=u'くらとみ')
self.assertRaises(AnsibleError, self.password_lookup._parse_parameters, testcase['term'])
class TestReadPasswordFile(unittest.TestCase):
def setUp(self):
self.os_path_exists = password.os.path.exists
def tearDown(self):
password.os.path.exists = self.os_path_exists
def test_no_password_file(self):
password.os.path.exists = lambda x: False
self.assertEqual(password._read_password_file(b'/nonexistent'), None)
def test_with_password_file(self):
password.os.path.exists = lambda x: True
with patch.object(builtins, 'open', mock_open(read_data=b'Testing\n')) as m:
self.assertEqual(password._read_password_file(b'/etc/motd'), u'Testing')
class TestGenCandidateChars(unittest.TestCase):
def _assert_gen_candidate_chars(self, testcase):
expected_candidate_chars = testcase['candidate_chars']
params = testcase['params']
chars_spec = params['chars']
res = password._gen_candidate_chars(chars_spec)
self.assertEqual(res, expected_candidate_chars)
def test_gen_candidate_chars(self):
for testcase in old_style_params_data:
self._assert_gen_candidate_chars(testcase)
class TestRandomPassword(unittest.TestCase):
def _assert_valid_chars(self, res, chars):
for res_char in res:
self.assertIn(res_char, chars)
def test_default(self):
res = password.random_password()
self.assertEqual(len(res), DEFAULT_LENGTH)
self.assertTrue(isinstance(res, str))
self._assert_valid_chars(res, DEFAULT_CANDIDATE_CHARS)
def test_zero_length(self):
res = password.random_password(length=0)
self.assertEqual(len(res), 0)
self.assertTrue(isinstance(res, str))
self._assert_valid_chars(res, u',')
def test_just_a_common(self):
res = password.random_password(length=1, chars=u',')
self.assertEqual(len(res), 1)
self.assertEqual(res, u',')
def test_free_will(self):
# A Rush and Spinal Tap reference twofer
res = password.random_password(length=11, chars=u'a')
self.assertEqual(len(res), 11)
self.assertEqual(res, 'aaaaaaaaaaa')
self._assert_valid_chars(res, u'a')
def test_unicode(self):
res = password.random_password(length=11, chars=u'くらとみ')
self._assert_valid_chars(res, u'くらとみ')
self.assertEqual(len(res), 11)
def test_seed(self):
pw1 = password.random_password(seed=1)
pw2 = password.random_password(seed=1)
pw3 = password.random_password(seed=2)
self.assertEqual(pw1, pw2)
self.assertNotEqual(pw1, pw3)
def test_gen_password(self):
for testcase in old_style_params_data:
params = testcase['params']
candidate_chars = testcase['candidate_chars']
params_chars_spec = password._gen_candidate_chars(params['chars'])
password_string = password.random_password(length=params['length'],
chars=params_chars_spec)
self.assertEqual(len(password_string),
params['length'],
msg='generated password=%s has length (%s) instead of expected length (%s)' %
(password_string, len(password_string), params['length']))
for char in password_string:
self.assertIn(char, candidate_chars,
msg='%s not found in %s from chars spect %s' %
(char, candidate_chars, params['chars']))
class TestParseContent(unittest.TestCase):
def test_empty_password_file(self):
plaintext_password, salt, ident = password._parse_content(u'')
self.assertEqual(plaintext_password, u'')
self.assertEqual(salt, None)
self.assertEqual(ident, None)
def test(self):
expected_content = u'12345678'
file_content = expected_content
plaintext_password, salt, ident = password._parse_content(file_content)
self.assertEqual(plaintext_password, expected_content)
self.assertEqual(salt, None)
self.assertEqual(ident, None)
def test_with_salt(self):
expected_content = u'12345678 salt=87654321'
file_content = expected_content
plaintext_password, salt, ident = password._parse_content(file_content)
self.assertEqual(plaintext_password, u'12345678')
self.assertEqual(salt, u'87654321')
self.assertEqual(ident, None)
def test_with_salt_and_ident(self):
expected_content = u'12345678 salt=87654321 ident=2a'
file_content = expected_content
plaintext_password, salt, ident = password._parse_content(file_content)
self.assertEqual(plaintext_password, u'12345678')
self.assertEqual(salt, u'87654321')
self.assertEqual(ident, u'2a')
class TestFormatContent(unittest.TestCase):
def test_no_encrypt(self):
self.assertEqual(
password._format_content(password=u'hunter42',
salt=u'87654321',
encrypt=False),
u'hunter42 salt=87654321')
def test_no_encrypt_no_salt(self):
self.assertEqual(
password._format_content(password=u'hunter42',
salt=None,
encrypt=None),
u'hunter42')
def test_encrypt(self):
self.assertEqual(
password._format_content(password=u'hunter42',
salt=u'87654321',
encrypt='pbkdf2_sha256'),
u'hunter42 salt=87654321')
def test_encrypt_no_salt(self):
self.assertRaises(AssertionError, password._format_content, u'hunter42', None, 'pbkdf2_sha256')
class TestWritePasswordFile(unittest.TestCase):
def setUp(self):
self.makedirs_safe = password.makedirs_safe
self.os_chmod = password.os.chmod
password.makedirs_safe = self.noop
password.os.chmod = self.noop
def noop(self, *args, **kwargs):
pass
def tearDown(self):
password.makedirs_safe = self.makedirs_safe
password.os.chmod = self.os_chmod
def test_content_written(self):
with patch.object(builtins, 'open', mock_open()) as m:
password._write_password_file(b'/this/is/a/test/caf\xc3\xa9', u'Testing Café')
m.assert_called_once_with(b'/this/is/a/test/caf\xc3\xa9', 'wb')
m().write.assert_called_once_with(u'Testing Café\n'.encode('utf-8'))
class BaseTestLookupModule(unittest.TestCase):
def setUp(self):
self.fake_loader = DictDataLoader({'/path/to/somewhere': 'sdfsdf'})
self.password_lookup = lookup_loader.get('password')
self.password_lookup._loader = self.fake_loader
self.os_path_exists = password.os.path.exists
self.os_open = password.os.open
password.os.open = self.noop
self.os_close = password.os.close
password.os.close = self.noop
self.makedirs_safe = password.makedirs_safe
password.makedirs_safe = self.noop
def noop(self, *args, **kwargs):
pass
def tearDown(self):
password.os.path.exists = self.os_path_exists
password.os.open = self.os_open
password.os.close = self.os_close
password.makedirs_safe = self.makedirs_safe
class TestLookupModuleWithoutPasslib(BaseTestLookupModule):
@patch.object(PluginLoader, '_get_paths')
@patch('ansible.plugins.lookup.password._write_password_file')
def test_no_encrypt(self, mock_get_paths, mock_write_file):
mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
results = self.password_lookup.run([u'/path/to/somewhere'], None)
# FIXME: assert something useful
for result in results:
assert len(result) == DEFAULT_LENGTH
assert isinstance(result, str)
@patch.object(PluginLoader, '_get_paths')
@patch('ansible.plugins.lookup.password._write_password_file')
def test_password_already_created_no_encrypt(self, mock_get_paths, mock_write_file):
mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
for result in results:
self.assertEqual(result, u'hunter42')
@patch.object(PluginLoader, '_get_paths')
@patch('ansible.plugins.lookup.password._write_password_file')
def test_only_a(self, mock_get_paths, mock_write_file):
mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
results = self.password_lookup.run([u'/path/to/somewhere chars=a'], None)
for result in results:
self.assertEqual(result, u'a' * DEFAULT_LENGTH)
@patch('time.sleep')
def test_lock_been_held(self, mock_sleep):
# pretend the lock file is here
password.os.path.exists = lambda x: True
with pytest.raises(AnsibleError):
with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
# should timeout here
self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
def test_lock_not_been_held(self):
# pretend now there is password file but no lock
password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
# should not timeout here
results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
for result in results:
self.assertEqual(result, u'hunter42')
@pytest.mark.skipif(passlib is None, reason='passlib must be installed to run these tests')
class TestLookupModuleWithPasslib(BaseTestLookupModule):
def setUp(self):
super(TestLookupModuleWithPasslib, self).setUp()
# Different releases of passlib default to a different number of rounds
self.sha256 = passlib.registry.get_crypt_handler('pbkdf2_sha256')
sha256_for_tests = pbkdf2.create_pbkdf2_hash("sha256", 32, 20000)
passlib.registry.register_crypt_handler(sha256_for_tests, force=True)
def tearDown(self):
super(TestLookupModuleWithPasslib, self).tearDown()
passlib.registry.register_crypt_handler(self.sha256, force=True)
@patch.object(PluginLoader, '_get_paths')
@patch('ansible.plugins.lookup.password._write_password_file')
def test_encrypt(self, mock_get_paths, mock_write_file):
mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
results = self.password_lookup.run([u'/path/to/somewhere encrypt=pbkdf2_sha256'], None)
# pbkdf2 format plus hash
expected_password_length = 76
for result in results:
self.assertEqual(len(result), expected_password_length)
# result should have 5 parts split by '$'
str_parts = result.split('$', 5)
# verify the result is parseable by the passlib
crypt_parts = passlib.hash.pbkdf2_sha256.parsehash(result)
# verify it used the right algo type
self.assertEqual(str_parts[1], 'pbkdf2-sha256')
self.assertEqual(len(str_parts), 5)
# verify the string and parsehash agree on the number of rounds
self.assertEqual(int(str_parts[2]), crypt_parts['rounds'])
self.assertIsInstance(result, str)
@patch('ansible.plugins.lookup.password._write_password_file')
def test_password_already_created_encrypt(self, mock_write_file):
password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
results = self.password_lookup.run([u'/path/to/somewhere chars=anything encrypt=pbkdf2_sha256'], None)
for result in results:
self.assertEqual(result, u'$pbkdf2-sha256$20000$ODc2NTQzMjE$Uikde0cv0BKaRaAXMrUQB.zvG4GmnjClwjghwIRf2gU')
# Assert the password file is not rewritten
mock_write_file.assert_not_called()
@pytest.mark.skipif(passlib is None, reason='passlib must be installed to run these tests')
class TestLookupModuleWithPasslibWrappedAlgo(BaseTestLookupModule):
def setUp(self):
super(TestLookupModuleWithPasslibWrappedAlgo, self).setUp()
self.os_path_exists = password.os.path.exists
def tearDown(self):
super(TestLookupModuleWithPasslibWrappedAlgo, self).tearDown()
password.os.path.exists = self.os_path_exists
@patch('ansible.plugins.lookup.password._write_password_file')
def test_encrypt_wrapped_crypt_algo(self, mock_write_file):
password.os.path.exists = self.password_lookup._loader.path_exists
with patch.object(builtins, 'open', mock_open(read_data=self.password_lookup._loader._get_file_contents('/path/to/somewhere')[0])) as m:
results = self.password_lookup.run([u'/path/to/somewhere encrypt=ldap_sha256_crypt'], None)
wrapper = getattr(passlib.hash, 'ldap_sha256_crypt')
self.assertEqual(len(results), 1)
result = results[0]
self.assertIsInstance(result, str)
expected_password_length = 76
self.assertEqual(len(result), expected_password_length)
# result should have 5 parts split by '$'
str_parts = result.split('$')
self.assertEqual(len(str_parts), 5)
# verify the string and passlib agree on the number of rounds
self.assertEqual(str_parts[2], "rounds=%s" % wrapper.default_rounds)
# verify it used the right algo type
self.assertEqual(str_parts[0], '{CRYPT}')
# verify it used the right algo type
self.assertTrue(wrapper.verify(self.password_lookup._loader._get_file_contents('/path/to/somewhere')[0], result))
# verify a password with a non default rounds value
# generated with: echo test | mkpasswd -s --rounds 660000 -m sha-256 --salt testansiblepass.
hashpw = '{CRYPT}$5$rounds=660000$testansiblepass.$KlRSdA3iFXoPI.dEwh7AixiXW3EtCkLrlQvlYA2sluD'
self.assertTrue(wrapper.verify('test', hashpw))
| 24,101
|
Python
|
.py
| 480
| 40.633333
| 144
| 0.655794
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,008
|
test_callback.py
|
ansible_ansible/test/units/plugins/callback/test_callback.py
|
# (c) 2012-2014, Chris Meyers <chris.meyers.fsu@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import json
import re
import textwrap
import types
import unittest
from unittest.mock import MagicMock
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.plugins.callback import CallbackBase
mock_task = MagicMock()
mock_task.delegate_to = None
class TestCallback(unittest.TestCase):
# FIXME: This doesn't really test anything...
def test_init(self):
CallbackBase()
def test_display(self):
display_mock = MagicMock()
display_mock.verbosity = 0
cb = CallbackBase(display=display_mock)
self.assertIs(cb._display, display_mock)
def test_display_verbose(self):
display_mock = MagicMock()
display_mock.verbosity = 5
cb = CallbackBase(display=display_mock)
self.assertIs(cb._display, display_mock)
def test_host_label(self):
result = TaskResult(host=Host('host1'), task=mock_task, return_data={})
self.assertEqual(CallbackBase.host_label(result), 'host1')
def test_host_label_delegated(self):
mock_task.delegate_to = 'host2'
result = TaskResult(
host=Host('host1'),
task=mock_task,
return_data={'_ansible_delegated_vars': {'ansible_host': 'host2'}},
)
self.assertEqual(CallbackBase.host_label(result), 'host1 -> host2')
# TODO: import callback module so we can patch callback.cli/callback.C
class TestCallbackResults(unittest.TestCase):
def test_get_item_label(self):
cb = CallbackBase()
results = {'item': 'some_item'}
res = cb._get_item_label(results)
self.assertEqual(res, 'some_item')
def test_get_item_label_no_log(self):
cb = CallbackBase()
results = {'item': 'some_item', '_ansible_no_log': True}
res = cb._get_item_label(results)
self.assertEqual(res, "(censored due to no_log)")
results = {'item': 'some_item', '_ansible_no_log': False}
res = cb._get_item_label(results)
self.assertEqual(res, "some_item")
def test_clean_results_debug_task(self):
cb = CallbackBase()
result = {'item': 'some_item',
'invocation': 'foo --bar whatever [some_json]',
'a': 'a single a in result note letter a is in invocation',
'b': 'a single b in result note letter b is not in invocation',
'changed': True}
cb._clean_results(result, 'debug')
# See https://github.com/ansible/ansible/issues/33723
self.assertTrue('a' in result)
self.assertTrue('b' in result)
self.assertFalse('invocation' in result)
self.assertFalse('changed' in result)
def test_clean_results_debug_task_no_invocation(self):
cb = CallbackBase()
result = {'item': 'some_item',
'a': 'a single a in result note letter a is in invocation',
'b': 'a single b in result note letter b is not in invocation',
'changed': True}
cb._clean_results(result, 'debug')
self.assertTrue('a' in result)
self.assertTrue('b' in result)
self.assertFalse('changed' in result)
self.assertFalse('invocation' in result)
def test_clean_results_debug_task_empty_results(self):
cb = CallbackBase()
result = {}
cb._clean_results(result, 'debug')
self.assertFalse('invocation' in result)
self.assertEqual(len(result), 0)
def test_clean_results(self):
cb = CallbackBase()
result = {'item': 'some_item',
'invocation': 'foo --bar whatever [some_json]',
'a': 'a single a in result note letter a is in invocation',
'b': 'a single b in result note letter b is not in invocation',
'changed': True}
expected_result = result.copy()
cb._clean_results(result, 'ebug')
self.assertEqual(result, expected_result)
class TestCallbackDumpResults(object):
def test_internal_keys(self):
cb = CallbackBase()
result = {'item': 'some_item',
'_ansible_some_var': 'SENTINEL',
'testing_ansible_out': 'should_be_left_in LEFTIN',
'invocation': 'foo --bar whatever [some_json]',
'some_dict_key': {'a_sub_dict_for_key': 'baz'},
'bad_dict_key': {'_ansible_internal_blah': 'SENTINEL'},
'changed': True}
json_out = cb._dump_results(result)
assert '"_ansible_' not in json_out
assert 'SENTINEL' not in json_out
assert 'LEFTIN' in json_out
def test_exception(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'exception': ['frame1', 'SENTINEL']}
json_out = cb._dump_results(result)
assert 'SENTINEL' not in json_out
assert 'exception' not in json_out
assert 'LEFTIN' in json_out
def test_verbose(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'_ansible_verbose_always': 'chicane'}
json_out = cb._dump_results(result)
assert 'SENTINEL' not in json_out
assert 'LEFTIN' in json_out
def test_diff(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'diff': ['remove stuff', 'added LEFTIN'],
'_ansible_verbose_always': 'chicane'}
json_out = cb._dump_results(result)
assert 'SENTINEL' not in json_out
assert 'LEFTIN' in json_out
def test_mixed_keys(self):
cb = CallbackBase()
result = {3: 'pi',
'tau': 6}
json_out = cb._dump_results(result)
round_trip_result = json.loads(json_out)
assert len(round_trip_result) == 2
assert '3' in round_trip_result
assert 'tau' in round_trip_result
assert round_trip_result['3'] == 'pi'
assert round_trip_result['tau'] == 6
class TestCallbackDiff(unittest.TestCase):
def setUp(self):
self.cb = CallbackBase()
def _strip_color(self, s):
return re.sub('\033\\[[^m]*m', '', s)
def test_difflist(self):
# TODO: split into smaller tests?
difflist = [{'before': u'preface\nThe Before String\npostscript',
'after': u'preface\nThe After String\npostscript',
'before_header': u'just before',
'after_header': u'just after'
},
{'before': u'preface\nThe Before String\npostscript',
'after': u'preface\nThe After String\npostscript',
},
{'src_binary': 'chicane'},
{'dst_binary': 'chicanery'},
{'dst_larger': 1},
{'src_larger': 2},
{'prepared': u'what does prepared do?'},
{'before_header': u'just before'},
{'after_header': u'just after'}]
res = self.cb._get_diff(difflist)
self.assertIn(u'Before String', res)
self.assertIn(u'After String', res)
self.assertIn(u'just before', res)
self.assertIn(u'just after', res)
def test_simple_diff(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': 'one\nthree\nfour\n',
})),
textwrap.dedent("""\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
-two
three
+four
"""))
def test_new_file(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': '',
'after': 'one\ntwo\nthree\n',
})),
textwrap.dedent("""\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -0,0 +1,3 @@
+one
+two
+three
"""))
def test_clear_file(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': '',
})),
textwrap.dedent("""\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +0,0 @@
-one
-two
-three
"""))
def test_no_trailing_newline_before(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree',
'after': 'one\ntwo\nthree\n',
})),
textwrap.dedent("""\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
two
-three
\\ No newline at end of file
+three
"""))
def test_no_trailing_newline_after(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': 'one\ntwo\nthree',
})),
textwrap.dedent("""\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
two
-three
+three
\\ No newline at end of file
"""))
def test_no_trailing_newline_both(self):
self.assertMultiLineEqual(
self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree',
'after': 'one\ntwo\nthree',
}),
'')
def test_no_trailing_newline_both_with_some_changes(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree',
'after': 'one\nfive\nthree',
})),
textwrap.dedent("""\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
-two
+five
three
\\ No newline at end of file
"""))
def test_diff_dicts(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before': dict(one=1, two=2, three=3),
'after': dict(one=1, three=3, four=4),
})),
textwrap.dedent("""\
--- before
+++ after
@@ -1,5 +1,5 @@
{
+ "four": 4,
"one": 1,
- "three": 3,
- "two": 2
+ "three": 3
}
"""))
def test_diff_before_none(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before': None,
'after': 'one line\n',
})),
textwrap.dedent("""\
--- before
+++ after
@@ -0,0 +1 @@
+one line
"""))
def test_diff_after_none(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before': 'one line\n',
'after': None,
})),
textwrap.dedent("""\
--- before
+++ after
@@ -1 +0,0 @@
-one line
"""))
class TestCallbackOnMethods(unittest.TestCase):
def _find_on_methods(self, callback):
cb_dir = dir(callback)
method_names = [x for x in cb_dir if '_on_' in x]
methods = [getattr(callback, mn) for mn in method_names]
return methods
def test_are_methods(self):
cb = CallbackBase()
for method in self._find_on_methods(cb):
self.assertIsInstance(method, types.MethodType)
def test_on_any(self):
cb = CallbackBase()
cb.v2_on_any('whatever', some_keyword='blippy')
cb.on_any('whatever', some_keyword='blippy')
| 14,157
|
Python
|
.py
| 351
| 28.472934
| 81
| 0.533144
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,009
|
test_linear.py
|
ansible_ansible/test/units/plugins/strategy/test_linear.py
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import unittest
from unittest.mock import patch, MagicMock
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.play_context import PlayContext
from ansible.plugins.strategy.linear import StrategyModule
from ansible.executor.task_queue_manager import TaskQueueManager
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestStrategyLinear(unittest.TestCase):
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_noop(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: no
tasks:
- block:
- block:
- name: task1
debug: msg='task1'
failed_when: inventory_hostname == 'host01'
- name: task2
debug: msg='task2'
rescue:
- name: rescue1
debug: msg='rescue1'
- name: rescue2
debug: msg='rescue2'
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
inventory = MagicMock()
inventory.hosts = {}
hosts = []
for i in range(0, 2):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory.hosts[host.name] = host
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
mock_var_manager._fact_cache['host00'] = dict()
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
passwords=None,
forks=5,
)
tqm._initialize_processes(3)
strategy = StrategyModule(tqm)
strategy._hosts_cache = [h.name for h in hosts]
strategy._hosts_cache_all = [h.name for h in hosts]
# debug: task1, debug: task1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, 'task1')
self.assertEqual(host2_task.name, 'task1')
# mark the second host failed
itr.mark_host_failed(hosts[1])
# debug: task2, noop
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
self.assertEqual(len(hosts_tasks), 1)
host, task = hosts_tasks[0]
self.assertEqual(host.name, 'host00')
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, 'task2')
# noop, debug: rescue1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
self.assertEqual(len(hosts_tasks), 1)
host, task = hosts_tasks[0]
self.assertEqual(host.name, 'host01')
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, 'rescue1')
# noop, debug: rescue2
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
self.assertEqual(len(hosts_tasks), 1)
host, task = hosts_tasks[0]
self.assertEqual(host.name, 'host01')
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, 'rescue2')
# end of iteration
assert not strategy._get_next_task_lockstep(strategy.get_hosts_left(itr), itr)
def test_noop_64999(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: no
tasks:
- name: block1
block:
- name: block2
block:
- name: block3
block:
- name: task1
debug:
failed_when: inventory_hostname == 'host01'
rescue:
- name: rescue1
debug:
msg: "rescue"
- name: after_rescue1
debug:
msg: "after_rescue1"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
inventory = MagicMock()
inventory.hosts = {}
hosts = []
for i in range(0, 2):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory.hosts[host.name] = host
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
mock_var_manager._fact_cache['host00'] = dict()
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
passwords=None,
forks=5,
)
tqm._initialize_processes(3)
strategy = StrategyModule(tqm)
strategy._hosts_cache = [h.name for h in hosts]
strategy._hosts_cache_all = [h.name for h in hosts]
# debug: task1, debug: task1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, 'task1')
self.assertEqual(host2_task.name, 'task1')
# mark the second host failed
itr.mark_host_failed(hosts[1])
# noop, debug: rescue1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
self.assertEqual(len(hosts_tasks), 1)
host, task = hosts_tasks[0]
self.assertEqual(host.name, 'host01')
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, 'rescue1')
# debug: after_rescue1, debug: after_rescue1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, 'after_rescue1')
self.assertEqual(host2_task.name, 'after_rescue1')
# end of iteration
assert not strategy._get_next_task_lockstep(strategy.get_hosts_left(itr), itr)
| 8,457
|
Python
|
.py
| 199
| 30.874372
| 97
| 0.582533
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,010
|
test_core.py
|
ansible_ansible/test/units/plugins/filter/test_core.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import pytest
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.filter.core import to_uuid
from ansible.errors import AnsibleFilterError
UUID_DEFAULT_NAMESPACE_TEST_CASES = (
('example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'),
('test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'),
('café.example', '8a99d6b1-fb8f-5f78-af86-879768589f56'),
)
UUID_TEST_CASES = (
('361E6D51-FAEC-444A-9079-341386DA8E2E', 'example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'),
('361E6D51-FAEC-444A-9079-341386DA8E2E', 'test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'),
('11111111-2222-3333-4444-555555555555', 'example.com', 'e776faa5-5299-55dc-9057-7a00e6be2364'),
)
@pytest.mark.parametrize('value, expected', UUID_DEFAULT_NAMESPACE_TEST_CASES)
def test_to_uuid_default_namespace(value, expected):
assert expected == to_uuid(value)
@pytest.mark.parametrize('namespace, value, expected', UUID_TEST_CASES)
def test_to_uuid(namespace, value, expected):
assert expected == to_uuid(value, namespace=namespace)
def test_to_uuid_invalid_namespace():
with pytest.raises(AnsibleFilterError) as e:
to_uuid('example.com', namespace='11111111-2222-3333-4444-555555555')
assert 'Invalid value' in to_native(e.value)
| 1,488
|
Python
|
.py
| 28
| 50
| 101
| 0.754144
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,011
|
test_mathstuff.py
|
ansible_ansible/test/units/plugins/filter/test_mathstuff.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import pytest
from jinja2 import Environment
import ansible.plugins.filter.mathstuff as ms
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
UNIQUE_DATA: list[tuple[list, list]] = [
([], []),
([1, 3, 4, 2], [1, 3, 4, 2]),
([1, 3, 2, 4, 2, 3], [1, 3, 2, 4]),
([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 1, 4, 2, 1, 4, 3, 2], [1, 4, 2, 3]),
]
TWO_SETS_DATA: list[tuple[list, list, tuple]] = [
([], [], ([], [], [])),
([1, 2], [1, 2], ([1, 2], [], [])),
([1, 2], [3, 4], ([], [1, 2], [1, 2, 3, 4])),
([1, 2, 3], [5, 3, 4], ([3], [1, 2], [1, 2, 5, 4])),
([1, 2, 3], [4, 3, 5], ([3], [1, 2], [1, 2, 4, 5])),
]
def dict_values(values: list[int]) -> list[dict[str, int]]:
"""Return a list of non-hashable values derived from the given list."""
return [dict(x=value) for value in values]
for _data, _expected in list(UNIQUE_DATA):
UNIQUE_DATA.append((dict_values(_data), dict_values(_expected)))
for _dataset1, _dataset2, _expected in list(TWO_SETS_DATA):
TWO_SETS_DATA.append((dict_values(_dataset1), dict_values(_dataset2), tuple(dict_values(answer) for answer in _expected)))
env = Environment()
def assert_lists_contain_same_elements(a, b) -> None:
"""Assert that the two values given are lists that contain the same elements, even when the elements cannot be sorted or hashed."""
assert isinstance(a, list)
assert isinstance(b, list)
missing_from_a = [item for item in b if item not in a]
missing_from_b = [item for item in a if item not in b]
assert not missing_from_a, f'elements from `b` {missing_from_a} missing from `a` {a}'
assert not missing_from_b, f'elements from `a` {missing_from_b} missing from `b` {b}'
@pytest.mark.parametrize('data, expected', UNIQUE_DATA, ids=str)
def test_unique(data, expected):
assert_lists_contain_same_elements(ms.unique(env, data), expected)
@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA, ids=str)
def test_intersect(dataset1, dataset2, expected):
assert_lists_contain_same_elements(ms.intersect(env, dataset1, dataset2), expected[0])
@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA, ids=str)
def test_difference(dataset1, dataset2, expected):
assert_lists_contain_same_elements(ms.difference(env, dataset1, dataset2), expected[1])
@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA, ids=str)
def test_symmetric_difference(dataset1, dataset2, expected):
assert_lists_contain_same_elements(ms.symmetric_difference(env, dataset1, dataset2), expected[2])
class TestLogarithm:
def test_log_non_number(self):
# Message changed in python3.6
with pytest.raises(AnsibleFilterTypeError, match='log\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
ms.logarithm('a')
with pytest.raises(AnsibleFilterTypeError, match='log\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
ms.logarithm(10, base='a')
def test_log_ten(self):
assert ms.logarithm(10, 10) == 1.0
assert ms.logarithm(69, 10) * 1000 // 1 == 1838
def test_log_natural(self):
assert ms.logarithm(69) * 1000 // 1 == 4234
def test_log_two(self):
assert ms.logarithm(69, 2) * 1000 // 1 == 6108
class TestPower:
def test_power_non_number(self):
# Message changed in python3.6
with pytest.raises(AnsibleFilterTypeError, match='pow\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
ms.power('a', 10)
with pytest.raises(AnsibleFilterTypeError, match='pow\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
ms.power(10, 'a')
def test_power_squared(self):
assert ms.power(10, 2) == 100
def test_power_cubed(self):
assert ms.power(10, 3) == 1000
class TestInversePower:
def test_root_non_number(self):
# Messages differed in python-2.6, python-2.7-3.5, and python-3.6+
with pytest.raises(AnsibleFilterTypeError, match="root\\(\\) can only be used on numbers:"
" (invalid literal for float\\(\\): a"
"|could not convert string to float: a"
"|could not convert string to float: 'a')"):
ms.inversepower(10, 'a')
with pytest.raises(AnsibleFilterTypeError, match="root\\(\\) can only be used on numbers: (a float is required|must be real number, not str)"):
ms.inversepower('a', 10)
def test_square_root(self):
assert ms.inversepower(100) == 10
assert ms.inversepower(100, 2) == 10
def test_cube_root(self):
assert ms.inversepower(27, 3) == 3
class TestRekeyOnMember():
# (Input data structure, member to rekey on, expected return)
VALID_ENTRIES = (
([{"proto": "eigrp", "state": "enabled"}, {"proto": "ospf", "state": "enabled"}],
'proto',
{'eigrp': {'state': 'enabled', 'proto': 'eigrp'}, 'ospf': {'state': 'enabled', 'proto': 'ospf'}}),
({'eigrp': {"proto": "eigrp", "state": "enabled"}, 'ospf': {"proto": "ospf", "state": "enabled"}},
'proto',
{'eigrp': {'state': 'enabled', 'proto': 'eigrp'}, 'ospf': {'state': 'enabled', 'proto': 'ospf'}}),
)
# (Input data structure, member to rekey on, expected error message)
INVALID_ENTRIES = (
# Fail when key is not found
(AnsibleFilterError, [{"proto": "eigrp", "state": "enabled"}], 'invalid_key', "Key invalid_key was not found"),
(AnsibleFilterError, {"eigrp": {"proto": "eigrp", "state": "enabled"}}, 'invalid_key', "Key invalid_key was not found"),
# Fail when key is duplicated
(AnsibleFilterError, [{"proto": "eigrp"}, {"proto": "ospf"}, {"proto": "ospf"}],
'proto', 'Key ospf is not unique, cannot correctly turn into dict'),
# Fail when value is not a dict
(AnsibleFilterTypeError, ["string"], 'proto', "List item is not a valid dict"),
(AnsibleFilterTypeError, [123], 'proto', "List item is not a valid dict"),
(AnsibleFilterTypeError, [[{'proto': 1}]], 'proto', "List item is not a valid dict"),
# Fail when we do not send a dict or list
(AnsibleFilterTypeError, "string", 'proto', "Type is not a valid list, set, or dict"),
(AnsibleFilterTypeError, 123, 'proto', "Type is not a valid list, set, or dict"),
)
@pytest.mark.parametrize("list_original, key, expected", VALID_ENTRIES)
def test_rekey_on_member_success(self, list_original, key, expected):
assert ms.rekey_on_member(list_original, key) == expected
@pytest.mark.parametrize("expected_exception_type, list_original, key, expected", INVALID_ENTRIES)
def test_fail_rekey_on_member(self, expected_exception_type, list_original, key, expected):
with pytest.raises(expected_exception_type) as err:
ms.rekey_on_member(list_original, key)
assert err.value.message == expected
def test_duplicate_strategy_overwrite(self):
list_original = ({'proto': 'eigrp', 'id': 1}, {'proto': 'ospf', 'id': 2}, {'proto': 'eigrp', 'id': 3})
expected = {'eigrp': {'proto': 'eigrp', 'id': 3}, 'ospf': {'proto': 'ospf', 'id': 2}}
assert ms.rekey_on_member(list_original, 'proto', duplicates='overwrite') == expected
| 7,620
|
Python
|
.py
| 127
| 53.125984
| 151
| 0.635997
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,012
|
import_fixture.py
|
ansible_ansible/test/units/plugins/loader_fixtures/import_fixture.py
|
# Nothing to see here, this file is just empty to support a imp.load_source
# without doing anything
from __future__ import annotations
class test:
def __init__(self, *args, **kwargs):
pass
| 204
|
Python
|
.py
| 6
| 30.666667
| 75
| 0.709184
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,013
|
test_cache.py
|
ansible_ansible/test/units/plugins/cache/test_cache.py
|
# (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import shutil
import tempfile
from unittest import mock
import unittest
from ansible.errors import AnsibleError
from ansible.plugins.cache import CachePluginAdjudicator
from ansible.plugins.cache.memory import CacheModule as MemoryCache
from ansible.plugins.loader import cache_loader, init_plugin_loader
from ansible.vars.fact_cache import FactCache
import pytest
class TestCachePluginAdjudicator(unittest.TestCase):
def setUp(self):
# memory plugin cache
self.cache = CachePluginAdjudicator()
self.cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'}
self.cache['cache_key_2'] = {'key': 'value'}
def test___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
assert self.cache['new_cache_key'] == {'new_key1': ['new_value1', 'new_value2']}
def test_inner___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
self.cache['new_cache_key']['new_key1'][0] = 'updated_value1'
assert self.cache['new_cache_key'] == {'new_key1': ['updated_value1', 'new_value2']}
def test___contains__(self):
assert 'cache_key' in self.cache
assert 'not_cache_key' not in self.cache
def test_get(self):
assert self.cache.get('cache_key') == {'key1': 'value1', 'key2': 'value2'}
def test_get_with_default(self):
assert self.cache.get('foo', 'bar') == 'bar'
def test_get_without_default(self):
assert self.cache.get('foo') is None
def test___getitem__(self):
with pytest.raises(KeyError):
self.cache['foo'] # pylint: disable=pointless-statement
def test_pop_with_default(self):
assert self.cache.pop('foo', 'bar') == 'bar'
def test_pop_without_default(self):
with pytest.raises(KeyError):
self.cache.pop('foo')
def test_pop(self):
v = self.cache.pop('cache_key_2')
assert v == {'key': 'value'}
assert 'cache_key_2' not in self.cache
def test_update(self):
self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
def test_update_cache_if_changed(self):
# Changes are stored in the CachePluginAdjudicator and will be
# persisted to the plugin when calling update_cache_if_changed()
# The exception is flush which flushes the plugin immediately.
assert len(self.cache.keys()) == 2
assert len(self.cache._plugin.keys()) == 0
self.cache.update_cache_if_changed()
assert len(self.cache._plugin.keys()) == 2
def test_flush(self):
# Fake that the cache already has some data in it but the adjudicator
# hasn't loaded it in.
self.cache._plugin.set('monkey', 'animal')
self.cache._plugin.set('wolf', 'animal')
self.cache._plugin.set('another wolf', 'another animal')
# The adjudicator does't know about the new entries
assert len(self.cache.keys()) == 2
# But the cache itself does
assert len(self.cache._plugin.keys()) == 3
# If we call flush, both the adjudicator and the cache should flush
self.cache.flush()
assert len(self.cache.keys()) == 0
assert len(self.cache._plugin.keys()) == 0
class TestJsonFileCache(TestCachePluginAdjudicator):
cache_prefix = ''
def setUp(self):
self.cache_dir = tempfile.mkdtemp(prefix='ansible-plugins-cache-')
self.cache = CachePluginAdjudicator(
plugin_name='jsonfile', _uri=self.cache_dir,
_prefix=self.cache_prefix)
self.cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'}
self.cache['cache_key_2'] = {'key': 'value'}
def test_keys(self):
# A cache without a prefix will consider all files in the cache
# directory as valid cache entries.
self.cache._plugin._dump(
'no prefix', os.path.join(self.cache_dir, 'no_prefix'))
self.cache._plugin._dump(
'special cache', os.path.join(self.cache_dir, 'special_test'))
# The plugin does not know the CachePluginAdjudicator entries.
assert sorted(self.cache._plugin.keys()) == [
'no_prefix', 'special_test']
assert 'no_prefix' in self.cache
assert 'special_test' in self.cache
assert 'test' not in self.cache
assert self.cache['no_prefix'] == 'no prefix'
assert self.cache['special_test'] == 'special cache'
def tearDown(self):
shutil.rmtree(self.cache_dir)
class TestJsonFileCachePrefix(TestJsonFileCache):
cache_prefix = 'special_'
def test_keys(self):
# For caches with a prefix only files that match the prefix are
# considered. The prefix is removed from the key name.
self.cache._plugin._dump(
'no prefix', os.path.join(self.cache_dir, 'no_prefix'))
self.cache._plugin._dump(
'special cache', os.path.join(self.cache_dir, 'special_test'))
# The plugin does not know the CachePluginAdjudicator entries.
assert sorted(self.cache._plugin.keys()) == ['test']
assert 'no_prefix' not in self.cache
assert 'special_test' not in self.cache
assert 'test' in self.cache
assert self.cache['test'] == 'special cache'
class TestFactCache(unittest.TestCase):
def setUp(self):
with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
self.cache = FactCache()
def test_copy(self):
self.cache['avocado'] = 'fruit'
self.cache['daisy'] = 'flower'
a_copy = self.cache.copy()
self.assertEqual(type(a_copy), dict)
self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower'))
def test_flush(self):
self.cache['motorcycle'] = 'vehicle'
self.cache['sock'] = 'clothing'
self.cache.flush()
assert len(self.cache.keys()) == 0
def test_plugin_load_failure(self):
init_plugin_loader()
# See https://github.com/ansible/ansible/issues/18751
# Note no fact_connection config set, so this will fail
with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'):
self.assertRaisesRegex(AnsibleError,
"Unable to load the facts cache plugin.*json.*",
FactCache)
def test_update(self):
self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
def test_memory_cachemodule_with_loader():
assert isinstance(cache_loader.get('memory'), MemoryCache)
| 7,445
|
Python
|
.py
| 156
| 40.185897
| 92
| 0.651166
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,014
|
test_vendor.py
|
ansible_ansible/test/units/_vendor/test_vendor.py
|
# (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import pkgutil
import pytest
import sys
from unittest.mock import patch
def reset_internal_vendor_package():
import ansible
ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor')
list(map(sys.path.remove, [path for path in sys.path if path == ansible_vendor_path]))
for pkg in ['ansible._vendor', 'ansible']:
sys.modules.pop(pkg, None)
def test_package_path_masking():
from ansible import _vendor
assert hasattr(_vendor, '__path__') and _vendor.__path__ == []
def test_no_vendored():
reset_internal_vendor_package()
with patch.object(pkgutil, 'iter_modules', return_value=[]):
previous_path = list(sys.path)
import ansible
ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor')
assert ansible_vendor_path not in sys.path
assert sys.path == previous_path
def test_vendored(vendored_pkg_names=None):
if not vendored_pkg_names:
vendored_pkg_names = ['boguspkg']
reset_internal_vendor_package()
with patch.object(pkgutil, 'iter_modules', return_value=list((None, p, None) for p in vendored_pkg_names)):
previous_path = list(sys.path)
import ansible
ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor')
assert sys.path[0] == ansible_vendor_path
assert sys.path[1:] == previous_path
def test_vendored_conflict():
with pytest.warns(UserWarning) as w:
test_vendored(vendored_pkg_names=['sys', 'pkgutil']) # pass a real package we know is already loaded
assert any(list('pkgutil, sys' in str(msg.message) for msg in w)) # ensure both conflicting modules are listed and sorted
| 1,890
|
Python
|
.py
| 39
| 42.948718
| 130
| 0.693188
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,015
|
time-command.py
|
ansible_ansible/.azure-pipelines/scripts/time-command.py
|
#!/usr/bin/env python
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
from __future__ import annotations
import sys
import time
def main():
"""Main program entry point."""
start = time.time()
sys.stdin.reconfigure(errors='surrogateescape')
sys.stdout.reconfigure(errors='surrogateescape')
for line in sys.stdin:
seconds = time.time() - start
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
sys.stdout.flush()
if __name__ == '__main__':
main()
| 565
|
Python
|
.py
| 16
| 30.8125
| 90
| 0.665434
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,016
|
publish-codecov.py
|
ansible_ansible/.azure-pipelines/scripts/publish-codecov.py
|
#!/usr/bin/env python
"""
Upload code coverage reports to codecov.io.
Multiple coverage files from multiple languages are accepted and aggregated after upload.
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
"""
from __future__ import annotations
import argparse
import dataclasses
import pathlib
import shutil
import subprocess
import tempfile
import typing as t
import urllib.request
@dataclasses.dataclass(frozen=True)
class CoverageFile:
name: str
path: pathlib.Path
flags: t.List[str]
@dataclasses.dataclass(frozen=True)
class Args:
dry_run: bool
path: pathlib.Path
def parse_args() -> Args:
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--dry-run', action='store_true')
parser.add_argument('path', type=pathlib.Path)
args = parser.parse_args()
# Store arguments in a typed dataclass
fields = dataclasses.fields(Args)
kwargs = {field.name: getattr(args, field.name) for field in fields}
return Args(**kwargs)
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
processed = []
for file in directory.joinpath('reports').glob('coverage*.xml'):
name = file.stem.replace('coverage=', '')
# Get flags from name
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
processed.append(CoverageFile(name, file, flags))
return tuple(processed)
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
for file in files:
cmd = [
str(codecov_bin),
'--name', file.name,
'--file', str(file.path),
]
for flag in file.flags:
cmd.extend(['--flags', flag])
if dry_run:
print(f'DRY-RUN: Would run command: {cmd}')
continue
subprocess.run(cmd, check=True)
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
if dry_run:
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
return
with urllib.request.urlopen(url) as resp:
with dest.open('w+b') as f:
# Read data in chunks rather than all at once
shutil.copyfileobj(resp, f, 64 * 1024)
dest.chmod(flags)
def main():
args = parse_args()
url = 'https://ci-files.testing.ansible.com/codecov/linux/codecov'
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
download_file(url, codecov_bin, 0o755, args.dry_run)
files = process_files(args.path)
upload_files(codecov_bin, files, args.dry_run)
if __name__ == '__main__':
main()
| 2,899
|
Python
|
.py
| 74
| 33.364865
| 127
| 0.664641
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,017
|
combine-coverage.py
|
ansible_ansible/.azure-pipelines/scripts/combine-coverage.py
|
#!/usr/bin/env python
"""
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
"""
from __future__ import annotations
import os
import re
import shutil
import sys
def main():
"""Main program entry point."""
source_directory = sys.argv[1]
if '/ansible_collections/' in os.getcwd():
output_path = "tests/output"
else:
output_path = "test/results"
destination_directory = os.path.join(output_path, 'coverage')
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
jobs = {}
count = 0
for name in os.listdir(source_directory):
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
label = match.group('label')
attempt = int(match.group('attempt'))
jobs[label] = max(attempt, jobs.get(label, 0))
for label, attempt in jobs.items():
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
source = os.path.join(source_directory, name)
source_files = os.listdir(source)
for source_file in source_files:
source_path = os.path.join(source, source_file)
destination_path = os.path.join(destination_directory, source_file + '.' + label)
print('"%s" -> "%s"' % (source_path, destination_path))
shutil.copyfile(source_path, destination_path)
count += 1
print('Coverage file count: %d' % count)
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
if __name__ == '__main__':
main()
| 2,121
|
Python
|
.py
| 45
| 41.288889
| 131
| 0.685257
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,018
|
context.py
|
ansible_ansible/lib/ansible/context.py
|
# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Context of the running Ansible.
In the future we *may* create Context objects to allow running multiple Ansible plays in parallel
with different contexts but that is currently out of scope as the Ansible library is just for
running the ansible command line tools.
These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release
"""
from __future__ import annotations
from collections.abc import Mapping, Set
from ansible.module_utils.common.collections import is_sequence
from ansible.utils.context_objects import CLIArgs, GlobalCLIArgs
__all__ = ('CLIARGS',)
# Note: this is not the singleton version. The Singleton is only created once the program has
# actually parsed the args
CLIARGS = CLIArgs({})
# This should be called immediately after cli_args are processed (parsed, validated, and any
# normalization performed on them). No other code should call it
def _init_global_context(cli_args):
"""Initialize the global context objects"""
global CLIARGS
CLIARGS = GlobalCLIArgs.from_options(cli_args)
def cliargs_deferred_get(key, default=None, shallowcopy=False):
"""Closure over getting a key from CLIARGS with shallow copy functionality
Primarily used in ``FieldAttribute`` where we need to defer setting the default
until after the CLI arguments have been parsed
This function is not directly bound to ``CliArgs`` so that it works with
``CLIARGS`` being replaced
"""
def inner():
value = CLIARGS.get(key, default=default)
if not shallowcopy:
return value
elif is_sequence(value):
return value[:]
elif isinstance(value, (Mapping, Set)):
return value.copy()
return value
return inner
| 1,933
|
Python
|
.py
| 40
| 43.975
| 112
| 0.746674
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,019
|
__main__.py
|
ansible_ansible/lib/ansible/__main__.py
|
# Copyright: (c) 2021, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import argparse
from importlib.metadata import distribution
def _short_name(name):
return name.removeprefix('ansible-').replace('ansible', 'adhoc')
def main():
dist = distribution('ansible-core')
ep_map = {_short_name(ep.name): ep for ep in dist.entry_points if ep.group == 'console_scripts'}
parser = argparse.ArgumentParser(prog='python -m ansible', add_help=False)
parser.add_argument('entry_point', choices=list(ep_map))
args, extra = parser.parse_known_args()
main = ep_map[args.entry_point].load()
main([args.entry_point] + extra)
if __name__ == '__main__':
main()
| 796
|
Python
|
.py
| 17
| 43.117647
| 100
| 0.704811
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,020
|
constants.py
|
ansible_ansible/lib/ansible/constants.py
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import re
from string import ascii_letters, digits
from ansible.config.manager import ConfigManager
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.common.collections import Sequence
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.release import __version__
from ansible.utils.fqcn import add_internal_fqcns
# initialize config manager/config data to read/store global settings
# and generate 'pseudo constants' for app consumption.
config = ConfigManager()
def _warning(msg):
""" display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write """
try:
from ansible.utils.display import Display
Display().warning(msg)
except Exception:
import sys
sys.stderr.write(' [WARNING] %s\n' % (msg))
def _deprecated(msg, version):
""" display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write """
try:
from ansible.utils.display import Display
Display().deprecated(msg, version=version)
except Exception:
import sys
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
def handle_config_noise(display=None):
if display is not None:
w = display.warning
d = display.deprecated
else:
w = _warning
d = _deprecated
while config.WARNINGS:
warn = config.WARNINGS.pop()
w(warn)
while config.DEPRECATED:
# tuple with name and options
dep = config.DEPRECATED.pop(0)
msg = config.get_deprecated_msg_from_config(dep[1])
# use tabs only for ansible-doc?
msg = msg.replace("\t", "")
d(f"{dep[0]} option. {msg}", version=dep[1]['version'])
def set_constant(name, value, export=vars()):
""" sets constants and returns resolved options dict """
export[name] = value
class _DeprecatedSequenceConstant(Sequence):
def __init__(self, value, msg, version):
self._value = value
self._msg = msg
self._version = version
def __len__(self):
_deprecated(self._msg, self._version)
return len(self._value)
def __getitem__(self, y):
_deprecated(self._msg, self._version)
return self._value[y]
# CONSTANTS ### yes, actual ones
# The following are hard-coded action names
_ACTION_DEBUG = add_internal_fqcns(('debug', ))
_ACTION_IMPORT_PLAYBOOK = add_internal_fqcns(('import_playbook', ))
_ACTION_IMPORT_ROLE = add_internal_fqcns(('import_role', ))
_ACTION_IMPORT_TASKS = add_internal_fqcns(('import_tasks', ))
_ACTION_INCLUDE_ROLE = add_internal_fqcns(('include_role', ))
_ACTION_INCLUDE_TASKS = add_internal_fqcns(('include_tasks', ))
_ACTION_INCLUDE_VARS = add_internal_fqcns(('include_vars', ))
_ACTION_INVENTORY_TASKS = add_internal_fqcns(('add_host', 'group_by'))
_ACTION_META = add_internal_fqcns(('meta', ))
_ACTION_SET_FACT = add_internal_fqcns(('set_fact', ))
_ACTION_SETUP = add_internal_fqcns(('setup', ))
_ACTION_HAS_CMD = add_internal_fqcns(('command', 'shell', 'script'))
_ACTION_ALLOWS_RAW_ARGS = _ACTION_HAS_CMD + add_internal_fqcns(('raw', ))
_ACTION_ALL_INCLUDES = _ACTION_INCLUDE_TASKS + _ACTION_INCLUDE_ROLE
_ACTION_ALL_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
_ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES = _ACTION_INCLUDE_ROLE + _ACTION_IMPORT_ROLE
_ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
_ACTION_ALL_INCLUDE_ROLE_TASKS = _ACTION_INCLUDE_ROLE + _ACTION_INCLUDE_TASKS
_ACTION_FACT_GATHERING = _ACTION_SETUP + add_internal_fqcns(('gather_facts', ))
_ACTION_WITH_CLEAN_FACTS = _ACTION_SET_FACT + _ACTION_INCLUDE_VARS
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
COLOR_CODES = {
'black': u'0;30', 'bright gray': u'0;37',
'blue': u'0;34', 'white': u'1;37',
'green': u'0;32', 'bright blue': u'1;34',
'cyan': u'0;36', 'bright green': u'1;32',
'red': u'0;31', 'bright cyan': u'1;36',
'purple': u'0;35', 'bright red': u'1;31',
'yellow': u'0;33', 'bright purple': u'1;35',
'dark gray': u'1;30', 'bright yellow': u'1;33',
'magenta': u'0;35', 'bright magenta': u'1;35',
'normal': u'0',
}
REJECT_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
BOOL_TRUE = BOOLEANS_TRUE
COLLECTION_PTYPE_COMPAT = {'module': 'modules'}
PYTHON_DOC_EXTENSIONS = ('.py',)
YAML_DOC_EXTENSIONS = ('.yml', '.yaml')
DOC_EXTENSIONS = PYTHON_DOC_EXTENSIONS + YAML_DOC_EXTENSIONS
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
# FIXME: expand to other plugins, but never doc fragments
CONFIGURABLE_PLUGINS = ('become', 'cache', 'callback', 'cliconf', 'connection', 'httpapi', 'inventory', 'lookup', 'netconf', 'shell', 'vars')
# NOTE: always update the docs/docsite/Makefile to match
DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy', 'test', 'filter')
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "MANIFEST", "Makefile") # ignore during module search
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
INTERNAL_STATIC_VARS = frozenset(
[
"ansible_async_path",
"ansible_collection_name",
"ansible_config_file",
"ansible_dependent_role_names",
"ansible_diff_mode",
"ansible_config_file",
"ansible_facts",
"ansible_forks",
"ansible_inventory_sources",
"ansible_limit",
"ansible_play_batch",
"ansible_play_hosts",
"ansible_play_hosts_all",
"ansible_play_role_names",
"ansible_playbook_python",
"ansible_role_name",
"ansible_role_names",
"ansible_run_tags",
"ansible_skip_tags",
"ansible_verbosity",
"ansible_version",
"inventory_dir",
"inventory_file",
"inventory_hostname",
"inventory_hostname_short",
"groups",
"group_names",
"omit",
"hostvars",
"playbook_dir",
"play_hosts",
"role_name",
"role_names",
"role_path",
"role_uuid",
"role_names",
]
)
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
WIN_MOVED = ['ansible.windows.win_command', 'ansible.windows.win_shell']
MODULE_REQUIRE_ARGS_SIMPLE = ['command', 'raw', 'script', 'shell', 'win_command', 'win_shell']
MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(MODULE_REQUIRE_ARGS_SIMPLE) + WIN_MOVED)
MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'shell', 'win_shell', 'raw')) + WIN_MOVED)
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python', 'ansible_facts')
SYNTHETIC_COLLECTIONS = ('ansible.builtin', 'ansible.legacy')
TREE_DIR = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
# This matches a string that cannot be used as a valid python variable name i.e 'not-valid', 'not!valid@either' '1_nor_This'
INVALID_VARIABLE_NAMES = re.compile(r'^[\d\W]|[^\w]')
# FIXME: remove once play_context mangling is removed
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
COMMON_CONNECTION_VARS = frozenset(('ansible_connection', 'ansible_host', 'ansible_user', 'ansible_shell_executable',
'ansible_port', 'ansible_pipelining', 'ansible_password', 'ansible_timeout',
'ansible_shell_type', 'ansible_module_compression', 'ansible_private_key_file'))
MAGIC_VARIABLE_MAPPING = dict(
# base
connection=('ansible_connection', ),
module_compression=('ansible_module_compression', ),
shell=('ansible_shell_type', ),
executable=('ansible_shell_executable', ),
# connection common
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
password=('ansible_ssh_pass', 'ansible_password'),
port=('ansible_ssh_port', 'ansible_port'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
# networking modules
network_os=('ansible_network_os', ),
connection_user=('ansible_connection_user',),
# ssh TODO: remove
ssh_executable=('ansible_ssh_executable', ),
ssh_common_args=('ansible_ssh_common_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
# docker TODO: remove
docker_extra_args=('ansible_docker_extra_args', ),
# become
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
)
# POPULATE SETTINGS FROM CONFIG ###
for setting in config.get_configuration_definitions():
set_constant(setting, config.get_config_value(setting, variables=vars()))
# emit any warnings or deprecations
handle_config_noise()
| 9,757
|
Python
|
.py
| 212
| 40.990566
| 143
| 0.675047
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,021
|
__init__.py
|
ansible_ansible/lib/ansible/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
# make vendored top-level modules accessible EARLY
import ansible._vendor
# Note: Do not add any code to this file. The ansible module may be
# a namespace package when using Ansible-2.1+ Anything in this file may not be
# available if one of the other packages in the namespace is loaded first.
#
# This is for backwards compat. Code should be ported to get these from
# ansible.release instead of from here.
from ansible.release import __version__, __author__
| 1,215
|
Python
|
.py
| 26
| 45.615385
| 78
| 0.777403
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,022
|
release.py
|
ansible_ansible/lib/ansible/release.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
__version__ = '2.19.0.dev0'
__author__ = 'Ansible, Inc.'
__codename__ = "What Is and What Should Never Be"
| 857
|
Python
|
.py
| 20
| 41.75
| 70
| 0.756886
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,023
|
ajson.py
|
ansible_ansible/lib/ansible/parsing/ajson.py
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import json
# Imported for backwards compat
from ansible.module_utils.common.json import AnsibleJSONEncoder # pylint: disable=unused-import
from ansible.parsing.vault import VaultLib
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import wrap_var
class AnsibleJSONDecoder(json.JSONDecoder):
_vaults = {} # type: dict[str, VaultLib]
def __init__(self, *args, **kwargs):
kwargs['object_hook'] = self.object_hook
super(AnsibleJSONDecoder, self).__init__(*args, **kwargs)
@classmethod
def set_secrets(cls, secrets):
cls._vaults['default'] = VaultLib(secrets=secrets)
def object_hook(self, pairs):
for key in pairs:
value = pairs[key]
if key == '__ansible_vault':
value = AnsibleVaultEncryptedUnicode(value)
if self._vaults:
value.vault = self._vaults['default']
return value
elif key == '__ansible_unsafe':
return wrap_var(value)
return pairs
| 1,254
|
Python
|
.py
| 28
| 36.928571
| 96
| 0.666392
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,024
|
plugin_docs.py
|
ansible_ansible/lib/ansible/parsing/plugin_docs.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import ast
import tokenize
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.utils.display import Display
display = Display()
string_to_vars = {
'DOCUMENTATION': 'doc',
'EXAMPLES': 'plainexamples',
'RETURN': 'returndocs',
'ANSIBLE_METADATA': 'metadata', # NOTE: now unused, but kept for backwards compat
}
def _var2string(value):
""" reverse lookup of the dict above """
for k, v in string_to_vars.items():
if v == value:
return k
def _init_doc_dict():
""" initialize a return dict for docs with the expected structure """
return {k: None for k in string_to_vars.values()}
def read_docstring_from_yaml_file(filename, verbose=True, ignore_errors=True):
""" Read docs from 'sidecar' yaml file doc for a plugin """
data = _init_doc_dict()
file_data = {}
try:
with open(filename, 'rb') as yamlfile:
file_data = AnsibleLoader(yamlfile.read(), file_name=filename).get_single_data()
except Exception as e:
msg = "Unable to parse yaml file '%s': %s" % (filename, to_native(e))
if not ignore_errors:
raise AnsibleParserError(msg, orig_exc=e)
elif verbose:
display.error(msg)
if file_data:
for key in string_to_vars:
data[string_to_vars[key]] = file_data.get(key, None)
return data
def read_docstring_from_python_module(filename, verbose=True, ignore_errors=True):
"""
Use tokenization to search for assignment of the documentation variables in the given file.
Parse from YAML and return the resulting python structure or None together with examples as plain text.
"""
seen = set()
data = _init_doc_dict()
next_string = None
with tokenize.open(filename) as f:
tokens = tokenize.generate_tokens(f.readline)
for token in tokens:
# found label that looks like variable
if token.type == tokenize.NAME:
# label is expected value, in correct place and has not been seen before
if token.start == 1 and token.string in string_to_vars and token.string not in seen:
# next token that is string has the docs
next_string = string_to_vars[token.string]
continue
# previous token indicated this string is a doc string
if next_string is not None and token.type == tokenize.STRING:
# ensure we only process one case of it
seen.add(token.string)
value = token.string
# strip string modifiers/delimiters
if value.startswith(('r', 'b')):
value = value.lstrip('rb')
if value.startswith(("'", '"')):
value = value.strip("'\"")
# actually use the data
if next_string == 'plainexamples':
# keep as string, can be yaml, but we let caller deal with it
data[next_string] = to_text(value)
else:
# yaml load the data
try:
data[next_string] = AnsibleLoader(value, file_name=filename).get_single_data()
except Exception as e:
msg = "Unable to parse docs '%s' in python file '%s': %s" % (_var2string(next_string), filename, to_native(e))
if not ignore_errors:
raise AnsibleParserError(msg, orig_exc=e)
elif verbose:
display.error(msg)
next_string = None
# if nothing else worked, fall back to old method
if not seen:
data = read_docstring_from_python_file(filename, verbose, ignore_errors)
return data
def read_docstring_from_python_file(filename, verbose=True, ignore_errors=True):
"""
Use ast to search for assignment of the DOCUMENTATION and EXAMPLES variables in the given file.
Parse DOCUMENTATION from YAML and return the YAML doc or None together with EXAMPLES, as plain text.
"""
data = _init_doc_dict()
try:
with open(filename, 'rb') as b_module_data:
M = ast.parse(b_module_data.read())
for child in M.body:
if isinstance(child, ast.Assign):
for t in child.targets:
try:
theid = t.id
except AttributeError:
# skip errors can happen when trying to use the normal code
display.warning("Building documentation, failed to assign id for %s on %s, skipping" % (t, filename))
continue
if theid in string_to_vars:
varkey = string_to_vars[theid]
if isinstance(child.value, ast.Dict):
data[varkey] = ast.literal_eval(child.value)
else:
if theid == 'EXAMPLES':
# examples 'can' be yaml, but even if so, we dont want to parse as such here
# as it can create undesired 'objects' that don't display well as docs.
data[varkey] = to_text(child.value.value)
else:
# string should be yaml if already not a dict
data[varkey] = AnsibleLoader(child.value.value, file_name=filename).get_single_data()
display.debug('Documentation assigned: %s' % varkey)
except Exception as e:
msg = "Unable to parse documentation in python file '%s': %s" % (filename, to_native(e))
if not ignore_errors:
raise AnsibleParserError(msg, orig_exc=e)
elif verbose:
display.error(msg)
return data
def read_docstring(filename, verbose=True, ignore_errors=True):
""" returns a documentation dictionary from Ansible plugin docstrings """
# NOTE: adjacency of doc file to code file is responsibility of caller
if filename.endswith(C.YAML_DOC_EXTENSIONS):
docstring = read_docstring_from_yaml_file(filename, verbose=verbose, ignore_errors=ignore_errors)
elif filename.endswith(C.PYTHON_DOC_EXTENSIONS):
docstring = read_docstring_from_python_module(filename, verbose=verbose, ignore_errors=ignore_errors)
elif not ignore_errors:
raise AnsibleError("Unknown documentation format: %s" % to_native(filename))
if not docstring and not ignore_errors:
raise AnsibleError("Unable to parse documentation for: %s" % to_native(filename))
# cause seealso is specially processed from 'doc' later on
# TODO: stop any other 'overloaded' implementation in main doc
docstring['seealso'] = None
return docstring
def read_docstub(filename):
"""
Quickly find short_description using string methods instead of node parsing.
This does not return a full set of documentation strings and is intended for
operations like ansible-doc -l.
"""
in_documentation = False
capturing = False
indent_detection = ''
doc_stub = []
with open(filename, 'r') as t_module_data:
for line in t_module_data:
if in_documentation:
# start capturing the stub until indentation returns
if capturing and line.startswith(indent_detection):
doc_stub.append(line)
elif capturing and not line.startswith(indent_detection):
break
elif line.lstrip().startswith('short_description:'):
capturing = True
# Detect that the short_description continues on the next line if it's indented more
# than short_description itself.
indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1)
doc_stub.append(line)
elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line):
in_documentation = True
short_description = r''.join(doc_stub).strip().rstrip('.')
data = AnsibleLoader(short_description, file_name=filename).get_single_data()
return data
| 8,661
|
Python
|
.py
| 171
| 38.590643
| 134
| 0.605691
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,025
|
splitter.py
|
ansible_ansible/lib/ansible/parsing/splitter.py
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import codecs
import re
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_text
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r"""
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)""".format(_HEXCHAR * 8, _HEXCHAR * 4, _HEXCHAR * 2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
"""
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
"""
args = to_text(args, nonstring='passthru')
options = {}
if args is not None:
vargs = split_args(args)
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn', 'stdin', 'stdin_add_newline', 'strip_empty_ends'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = join_args(raw_params)
return options
def _get_quote_state(token, quote_char):
"""
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
"""
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx - 1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
"""
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
"""
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def join_args(s):
"""
Join the original cmd based on manipulations by split_args().
This retains the original newlines and whitespaces.
"""
result = ''
for p in s:
if len(result) == 0 or result.endswith('\n'):
result += p
else:
result += ' ' + p
return result
def split_args(args):
"""
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
"""
if not args:
return []
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on newlines
items = args.split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for (itemidx, item) in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.split(' ')
line_continuation = False
for (idx, token) in enumerate(tokens):
# Empty entries means we have subsequent spaces
# We want to hold onto them so we can reconstruct them later
if len(token) == 0 and idx != 0:
# Make sure there is a params item to store result in.
if not params:
params.append('')
params[-1] += ' '
continue
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes and not (print_depth or block_depth or comment_depth):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
else:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
# Make sure there is a params item to store result in.
if not params:
params.append('')
params[-1] += '\n'
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise AnsibleParserError(u"failed at splitting arguments, either an unbalanced jinja2 block or quotes: {0}".format(args))
return params
| 11,021
|
Python
|
.py
| 231
| 38.090909
| 146
| 0.611975
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,026
|
dataloader.py
|
ansible_ansible/lib/ansible/parsing/dataloader.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import copy
import os
import os.path
import re
import tempfile
import typing as t
from ansible import constants as C
from ansible.errors import AnsibleFileNotFound, AnsibleParserError
from ansible.module_utils.basic import is_executable
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.parsing.quoting import unquote
from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.vault import VaultLib, is_encrypted, is_encrypted_file, parse_vaulttext_envelope, PromptVaultSecret
from ansible.utils.path import unfrackpath
from ansible.utils.display import Display
display = Display()
# Tries to determine if a path is inside a role, last dir must be 'tasks'
# this is not perfect but people should really avoid 'tasks' dirs outside roles when using Ansible.
RE_TASKS = re.compile(u'(?:^|%s)+tasks%s?$' % (os.path.sep, os.path.sep))
class DataLoader:
"""
The DataLoader class is used to load and parse YAML or JSON content,
either from a given file name or from a string that was previously
read in through other means. A Vault password can be specified, and
any vault-encrypted files will be decrypted.
Data read from files will also be cached, so the file will never be
read from disk more than once.
Usage:
dl = DataLoader()
# optionally: dl.set_vault_secrets([('default', ansible.parsing.vault.PrompVaultSecret(...),)])
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
"""
def __init__(self):
self._basedir = '.'
# NOTE: not effective with forks as the main copy does not get updated.
# avoids rereading files
self._FILE_CACHE = dict()
# NOTE: not thread safe, also issues with forks not returning data to main proc
# so they need to be cleaned independently. See WorkerProcess for example.
# used to keep track of temp files for cleaning
self._tempfiles = set()
# initialize the vault stuff with an empty password
# TODO: replace with a ref to something that can get the password
# a creds/auth provider
self._vaults = {}
self._vault = VaultLib()
self.set_vault_secrets(None)
# TODO: since we can query vault_secrets late, we could provide this to DataLoader init
def set_vault_secrets(self, vault_secrets: list[tuple[str, PromptVaultSecret]] | None) -> None:
self._vault.secrets = vault_secrets
def load(self, data: str, file_name: str = '<string>', show_content: bool = True, json_only: bool = False) -> t.Any:
"""Backwards compat for now"""
return from_yaml(data, file_name, show_content, self._vault.secrets, json_only=json_only)
def load_from_file(self, file_name: str, cache: str = 'all', unsafe: bool = False, json_only: bool = False) -> t.Any:
"""
Loads data from a file, which can contain either JSON or YAML.
:param file_name: The name of the file to load data from.
:param cache: Options for caching: none|all|vaulted
:param unsafe: If True, returns the parsed data as-is without deep copying.
:param json_only: If True, only loads JSON data from the file.
:return: The loaded data, optionally deep-copied for safety.
"""
# Resolve the file name
file_name = self.path_dwim(file_name)
# Log the file being loaded
display.debug("Loading data from %s" % file_name)
# Check if the file has been cached and use the cached data if available
if cache != 'none' and file_name in self._FILE_CACHE:
parsed_data = self._FILE_CACHE[file_name]
else:
# Read the file contents and load the data structure from them
(b_file_data, show_content) = self._get_file_contents(file_name)
file_data = to_text(b_file_data, errors='surrogate_or_strict')
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content, json_only=json_only)
# Cache the file contents for next time based on the cache option
if cache == 'all':
self._FILE_CACHE[file_name] = parsed_data
elif cache == 'vaulted' and not show_content:
self._FILE_CACHE[file_name] = parsed_data
# Return the parsed data, optionally deep-copied for safety
if unsafe:
return parsed_data
else:
return copy.deepcopy(parsed_data)
def path_exists(self, path: str) -> bool:
path = self.path_dwim(path)
return os.path.exists(to_bytes(path, errors='surrogate_or_strict'))
def is_file(self, path: str) -> bool:
path = self.path_dwim(path)
return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull
def is_directory(self, path: str) -> bool:
path = self.path_dwim(path)
return os.path.isdir(to_bytes(path, errors='surrogate_or_strict'))
def list_directory(self, path: str) -> list[str]:
path = self.path_dwim(path)
return os.listdir(path)
def is_executable(self, path: str) -> bool:
"""is the given path executable?"""
path = self.path_dwim(path)
return is_executable(path)
def _decrypt_if_vault_data(self, b_vault_data: bytes, b_file_name: bytes | None = None) -> tuple[bytes, bool]:
"""Decrypt b_vault_data if encrypted and return b_data and the show_content flag"""
if not is_encrypted(b_vault_data):
show_content = True
return b_vault_data, show_content
b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(b_vault_data)
b_data = self._vault.decrypt(b_vault_data, filename=b_file_name)
show_content = False
return b_data, show_content
def _get_file_contents(self, file_name: str) -> tuple[bytes, bool]:
"""
Reads the file contents from the given file name
If the contents are vault-encrypted, it will decrypt them and return
the decrypted data
:arg file_name: The name of the file to read. If this is a relative
path, it will be expanded relative to the basedir
:raises AnsibleFileNotFound: if the file_name does not refer to a file
:raises AnsibleParserError: if we were unable to read the file
:return: Returns a byte string of the file contents
"""
if not file_name or not isinstance(file_name, (binary_type, text_type)):
raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
b_file_name = to_bytes(self.path_dwim(file_name))
# This is what we really want but have to fix unittests to make it pass
# if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name):
if not self.path_exists(b_file_name):
raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
try:
with open(b_file_name, 'rb') as f:
data = f.read()
return self._decrypt_if_vault_data(data, b_file_name)
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)), orig_exc=e)
def get_basedir(self) -> str:
""" returns the current basedir """
return self._basedir
def set_basedir(self, basedir: str) -> None:
""" sets the base directory, used to find files when a relative path is given """
if basedir is not None:
self._basedir = to_text(basedir)
def path_dwim(self, given: str) -> str:
"""
make relative paths work like folks expect.
"""
given = unquote(given)
given = to_text(given, errors='surrogate_or_strict')
if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'):
path = given
else:
basedir = to_text(self._basedir, errors='surrogate_or_strict')
path = os.path.join(basedir, given)
return unfrackpath(path, follow=False)
def _is_role(self, path: str) -> bool:
""" imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc """
b_path = to_bytes(path, errors='surrogate_or_strict')
b_path_dirname = os.path.dirname(b_path)
b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict')
untasked_paths = (
os.path.join(b_path, b'main.yml'),
os.path.join(b_path, b'main.yaml'),
os.path.join(b_path, b'main'),
)
tasked_paths = (
os.path.join(b_upath, b'tasks/main.yml'),
os.path.join(b_upath, b'tasks/main.yaml'),
os.path.join(b_upath, b'tasks/main'),
os.path.join(b_upath, b'meta/main.yml'),
os.path.join(b_upath, b'meta/main.yaml'),
os.path.join(b_upath, b'meta/main'),
os.path.join(b_path_dirname, b'tasks/main.yml'),
os.path.join(b_path_dirname, b'tasks/main.yaml'),
os.path.join(b_path_dirname, b'tasks/main'),
os.path.join(b_path_dirname, b'meta/main.yml'),
os.path.join(b_path_dirname, b'meta/main.yaml'),
os.path.join(b_path_dirname, b'meta/main'),
)
exists_untasked = map(os.path.exists, untasked_paths)
exists_tasked = map(os.path.exists, tasked_paths)
if RE_TASKS.search(path) and any(exists_untasked) or any(exists_tasked):
return True
return False
def path_dwim_relative(self, path: str, dirname: str, source: str, is_role: bool = False) -> str:
"""
find one file in either a role or playbook dir with or without
explicitly named dirname subdirs
Used in action plugins and lookups to find supplemental files that
could be in either place.
"""
search = []
source = to_text(source, errors='surrogate_or_strict')
# I have full path, nothing else needs to be looked at
if source.startswith(to_text(os.path.sep)) or source.startswith(u'~'):
search.append(unfrackpath(source, follow=False))
else:
# base role/play path + templates/files/vars + relative filename
search.append(os.path.join(path, dirname, source))
basedir = unfrackpath(path, follow=False)
# not told if role, but detect if it is a role and if so make sure you get correct base path
if not is_role:
is_role = self._is_role(path)
if is_role and RE_TASKS.search(path):
basedir = unfrackpath(os.path.dirname(path), follow=False)
cur_basedir = self._basedir
self.set_basedir(basedir)
# resolved base role/play path + templates/files/vars + relative filename
search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False))
self.set_basedir(cur_basedir)
if is_role and not source.endswith(dirname):
# look in role's tasks dir w/o dirname
search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False))
# try to create absolute path for loader basedir + templates/files/vars + filename
search.append(unfrackpath(os.path.join(dirname, source), follow=False))
# try to create absolute path for loader basedir
search.append(unfrackpath(os.path.join(basedir, source), follow=False))
# try to create absolute path for dirname + filename
search.append(self.path_dwim(os.path.join(dirname, source)))
# try to create absolute path for filename
search.append(self.path_dwim(source))
for candidate in search:
if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')):
break
return candidate
def path_dwim_relative_stack(self, paths: list[str], dirname: str, source: str, is_role: bool = False) -> str:
"""
find one file in first path in stack taking roles into account and adding play basedir as fallback
:arg paths: A list of text strings which are the paths to look for the filename in.
:arg dirname: A text string representing a directory. The directory
is prepended to the source to form the path to search for.
:arg source: A text string which is the filename to search for
:rtype: A text string
:returns: An absolute path to the filename ``source`` if found
:raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths
"""
b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
b_source = to_bytes(source, errors='surrogate_or_strict')
result = None
search = []
if source is None:
display.warning('Invalid request to find a file that matches a "null" value')
elif source and (source.startswith('~') or source.startswith(os.path.sep)):
# path is absolute, no relative needed, check existence and return source
test_path = unfrackpath(b_source, follow=False)
if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')):
result = test_path
else:
display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths))
for path in paths:
upath = unfrackpath(path, follow=False)
b_upath = to_bytes(upath, errors='surrogate_or_strict')
b_pb_base_dir = os.path.dirname(b_upath)
# if path is in role and 'tasks' not there already, add it into the search
if (is_role or self._is_role(path)) and b_pb_base_dir.endswith(b'/tasks'):
search.append(os.path.join(os.path.dirname(b_pb_base_dir), b_dirname, b_source))
search.append(os.path.join(b_pb_base_dir, b_source))
# don't add dirname if user already is using it in source
if b_source.split(b'/')[0] != dirname:
search.append(os.path.join(b_upath, b_dirname, b_source))
search.append(os.path.join(b_upath, b_source))
# always append basedir as last resort
# don't add dirname if user already is using it in source
if b_source.split(b'/')[0] != dirname:
search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_dirname, b_source))
search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_source))
display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
for b_candidate in search:
display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate)))
if os.path.exists(b_candidate):
result = to_text(b_candidate)
break
if result is None:
raise AnsibleFileNotFound(file_name=source, paths=[to_native(p) for p in search])
return result
def _create_content_tempfile(self, content: str | bytes) -> str:
""" Create a tempfile containing defined content """
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def get_real_file(self, file_path: str, decrypt: bool = True) -> str:
"""
If the file is vault encrypted return a path to a temporary decrypted file
If the file is not encrypted then the path is returned
Temporary files are cleanup in the destructor
"""
if not file_path or not isinstance(file_path, (binary_type, text_type)):
raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path))
b_file_path = to_bytes(file_path, errors='surrogate_or_strict')
if not self.path_exists(b_file_path) or not self.is_file(b_file_path):
raise AnsibleFileNotFound(file_name=file_path)
real_path = self.path_dwim(file_path)
try:
if decrypt:
with open(to_bytes(real_path), 'rb') as f:
# Limit how much of the file is read since we do not know
# whether this is a vault file and therefore it could be very
# large.
if is_encrypted_file(f):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
data = f.read()
if not self._vault.secrets:
raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path))
data = self._vault.decrypt(data, filename=real_path)
# Make a temp file
real_path = self._create_content_tempfile(data)
self._tempfiles.add(real_path)
return real_path
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e)
def cleanup_tmp_file(self, file_path: str) -> None:
"""
Removes any temporary files created from a previous call to
get_real_file. file_path must be the path returned from a
previous call to get_real_file.
"""
if file_path in self._tempfiles:
os.unlink(file_path)
self._tempfiles.remove(file_path)
def cleanup_all_tmp_files(self) -> None:
"""
Removes all temporary files that DataLoader has created
NOTE: not thread safe, forks also need special handling see __init__ for details.
"""
for f in list(self._tempfiles):
try:
self.cleanup_tmp_file(f)
except Exception as e:
display.warning("Unable to cleanup temp files: %s" % to_text(e))
def find_vars_files(self, path: str, name: str, extensions: list[str] | None = None, allow_dir: bool = True) -> list[str]:
"""
Find vars files in a given path with specified name. This will find
files in a dir named <name>/ or a file called <name> ending in known
extensions.
"""
b_path = to_bytes(os.path.join(path, name))
found = []
if extensions is None:
# Look for file with no extension first to find dir before file
extensions = [''] + C.YAML_FILENAME_EXTENSIONS
# add valid extensions to name
for ext in extensions:
if '.' in ext:
full_path = b_path + to_bytes(ext)
elif ext:
full_path = b'.'.join([b_path, to_bytes(ext)])
else:
full_path = b_path
if self.path_exists(full_path):
if self.is_directory(full_path):
if allow_dir:
found.extend(self._get_dir_vars_files(to_text(full_path), extensions))
else:
continue
else:
found.append(to_text(full_path))
break
return found
def _get_dir_vars_files(self, path: str, extensions: list[str]) -> list[str]:
found = []
for spath in sorted(self.list_directory(path)):
if not spath.startswith(u'.') and not spath.endswith(u'~'): # skip hidden and backups
ext = os.path.splitext(spath)[-1]
full_spath = os.path.join(path, spath)
if self.is_directory(full_spath) and not ext: # recursive search if dir
found.extend(self._get_dir_vars_files(full_spath, extensions))
elif self.is_file(full_spath) and (not ext or to_text(ext) in extensions):
# only consider files with valid extensions or no extension
found.append(full_spath)
return found
| 20,884
|
Python
|
.py
| 383
| 43.483029
| 147
| 0.619622
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,027
|
__init__.py
|
ansible_ansible/lib/ansible/parsing/__init__.py
|
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
| 742
|
Python
|
.py
| 17
| 42.588235
| 70
| 0.776243
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,028
|
quoting.py
|
ansible_ansible/lib/ansible/parsing/quoting.py
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
def is_quoted(data):
return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
""" removes first and last quotes from a string, if the string starts and ends with the same quotes """
if is_quoted(data):
return data[1:-1]
return data
| 1,057
|
Python
|
.py
| 24
| 41.833333
| 107
| 0.729572
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,029
|
mod_args.py
|
ansible_ansible/lib/ansible/parsing/mod_args.py
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import ansible.constants as C
from ansible.errors import AnsibleParserError, AnsibleError, AnsibleAssertionError
from ansible.module_utils.six import string_types
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text
from ansible.parsing.splitter import parse_kv, split_args
from ansible.plugins.loader import module_loader, action_loader
from ansible.template import Templar
from ansible.utils.fqcn import add_internal_fqcns
# modules formated for user msg
_BUILTIN_RAW_PARAM_MODULES_SIMPLE = set([
'include_vars',
'include_tasks',
'include_role',
'import_tasks',
'import_role',
'add_host',
'group_by',
'set_fact',
'meta',
])
FREEFORM_ACTIONS_SIMPLE = set(C.MODULE_REQUIRE_ARGS_SIMPLE)
FREEFORM_ACTIONS = frozenset(C.MODULE_REQUIRE_ARGS)
RAW_PARAM_MODULES_SIMPLE = _BUILTIN_RAW_PARAM_MODULES_SIMPLE.union(FREEFORM_ACTIONS_SIMPLE)
# For filtering out modules correctly below, use all permutations
RAW_PARAM_MODULES = frozenset(add_internal_fqcns(RAW_PARAM_MODULES_SIMPLE)).union(FREEFORM_ACTIONS)
BUILTIN_TASKS = frozenset(add_internal_fqcns((
'meta',
'include_tasks',
'include_role',
'import_tasks',
'import_role'
)))
def _get_action_context(action_or_module, collection_list):
module_context = module_loader.find_plugin_with_context(action_or_module, collection_list=collection_list)
if module_context and module_context.resolved and module_context.action_plugin:
action_or_module = module_context.action_plugin
context = action_loader.find_plugin_with_context(action_or_module, collection_list=collection_list)
if not context or not context.resolved:
context = module_context
return context
class ModuleArgsParser:
"""
There are several ways a module and argument set can be expressed:
# legacy form (for a shell command)
- action: shell echo hi
# common shorthand for local actions vs delegate_to
- local_action: shell echo hi
# most commonly:
- copy: src=a dest=b
# legacy form
- action: copy src=a dest=b
# complex args form, for passing structured data
- copy:
src: a
dest: b
# gross, but technically legal
- action:
module: copy
args:
src: a
dest: b
# Standard YAML form for command-type modules. In this case, the args specified
# will act as 'defaults' and will be overridden by any args specified
# in one of the other formats (complex args under the action, or
# parsed from the k=v string
- command: 'pwd'
args:
chdir: '/tmp'
This class has some of the logic to canonicalize these into the form
- module: <module_name>
delegate_to: <optional>
args: <args>
Args may also be munged for certain shell command parameters.
"""
def __init__(self, task_ds=None, collection_list=None):
task_ds = {} if task_ds is None else task_ds
if not isinstance(task_ds, dict):
raise AnsibleAssertionError("the type of 'task_ds' should be a dict, but is a %s" % type(task_ds))
self._task_ds = task_ds
self._collection_list = collection_list
# delayed local imports to prevent circular import
from ansible.playbook.task import Task
from ansible.playbook.handler import Handler
# store the valid Task/Handler attrs for quick access
self._task_attrs = set(Task.fattributes)
self._task_attrs.update(set(Handler.fattributes))
# HACK: why are these not FieldAttributes on task with a post-validate to check usage?
self._task_attrs.update(['local_action', 'static'])
self._task_attrs = frozenset(self._task_attrs)
self.resolved_action = None
def _split_module_string(self, module_string):
"""
when module names are expressed like:
action: copy src=a dest=b
the first part of the string is the name of the module
and the rest are strings pertaining to the arguments.
"""
tokens = split_args(module_string)
if len(tokens) > 1:
return (tokens[0].strip(), " ".join(tokens[1:]))
else:
return (tokens[0].strip(), "")
def _normalize_parameters(self, thing, action=None, additional_args=None):
"""
arguments can be fuzzy. Deal with all the forms.
"""
additional_args = {} if additional_args is None else additional_args
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
if isinstance(additional_args, string_types):
templar = Templar(loader=None)
if templar.is_template(additional_args):
final_args['_variable_params'] = additional_args
else:
raise AnsibleParserError("Complex args containing variables cannot use bare variables (without Jinja2 delimiters), "
"and must use the full variable style ('{{var_name}}')")
elif isinstance(additional_args, dict):
final_args.update(additional_args)
else:
raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's a 'new style' invocation.
# otherwise, it's not
if action is not None:
args = self._normalize_new_style_args(thing, action)
else:
(action, args) = self._normalize_old_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
tmp_args = args.pop('args')
if isinstance(tmp_args, string_types):
tmp_args = parse_kv(tmp_args)
args.update(tmp_args)
# only internal variables can start with an underscore, so
# we don't allow users to set them directly in arguments
if args and action not in FREEFORM_ACTIONS:
for arg in args:
arg = to_text(arg)
if arg.startswith('_ansible_'):
err_msg = (
f"Invalid parameter specified beginning with keyword '_ansible_' for action '{action !s}': '{arg !s}'. "
"The prefix '_ansible_' is reserved for internal use only."
)
raise AnsibleError(err_msg)
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
final_args.update(args)
return (action, final_args)
def _normalize_new_style_args(self, thing, action):
"""
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and returns
a dictionary of arguments
possible example inputs:
'echo hi', 'shell'
{'region': 'xyz'}, 'ec2'
standardized outputs like:
{ _raw_params: 'echo hi', _uses_shell: True }
"""
if isinstance(thing, dict):
# form is like: { xyz: { x: 2, y: 3 } }
args = thing
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b
check_raw = action in FREEFORM_ACTIONS
args = parse_kv(thing, check_raw=check_raw)
elif thing is None:
# this can happen with modules which take no params, like ping:
args = None
else:
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return args
def _normalize_old_style_args(self, thing):
"""
deals with fuzziness in old-style (action/local_action) module invocations
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'shell' : 'echo hi' }
'shell echo hi'
{'module': 'ec2', 'x': 1 }
standardized outputs like:
('ec2', { 'x': 1} )
"""
action = None
args = None
if isinstance(thing, dict):
# form is like: action: { module: 'copy', src: 'a', dest: 'b' }
thing = thing.copy()
if 'module' in thing:
action, module_args = self._split_module_string(thing['module'])
args = thing.copy()
check_raw = action in FREEFORM_ACTIONS
args.update(parse_kv(module_args, check_raw=check_raw))
del args['module']
elif isinstance(thing, string_types):
# form is like: action: copy src=a dest=b
(action, args) = self._split_module_string(thing)
check_raw = action in FREEFORM_ACTIONS
args = parse_kv(args, check_raw=check_raw)
else:
# need a dict or a string, so giving up
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return (action, args)
def parse(self, skip_action_validation=False):
"""
Given a task in one of the supported forms, parses and returns
returns the action, arguments, and delegate_to values for the
task, dealing with all sorts of levels of fuzziness.
"""
thing = None
action = None
delegate_to = self._task_ds.get('delegate_to', Sentinel)
args = dict()
# This is the standard YAML form for command-type modules. We grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
additional_args = self._task_ds.get('args', dict())
# We can have one of action, local_action, or module specified
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
# local_action is similar but also implies a delegate_to
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
if action is not None and not skip_action_validation:
context = _get_action_context(action, self._collection_list)
if context is not None and context.resolved:
self.resolved_action = context.resolved_fqcn
# module: <stuff> is the more new-style invocation
# filter out task attributes so we're only querying unrecognized keys as actions/modules
non_task_ds = dict((k, v) for k, v in self._task_ds.items() if (k not in self._task_attrs) and (not k.startswith('with_')))
# walk the filtered input dictionary to see if we recognize a module name
for item, value in non_task_ds.items():
context = None
is_action_candidate = False
if item in BUILTIN_TASKS:
is_action_candidate = True
elif skip_action_validation:
is_action_candidate = True
else:
try:
context = _get_action_context(item, self._collection_list)
except AnsibleError as e:
if e.obj is None:
e.obj = self._task_ds
raise e
is_action_candidate = context.resolved and bool(context.redirect_list)
if is_action_candidate:
# finding more than one module name is a problem
if action is not None:
raise AnsibleParserError("conflicting action statements: %s, %s" % (action, item), obj=self._task_ds)
if context is not None and context.resolved:
self.resolved_action = context.resolved_fqcn
action = item
thing = value
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# if we didn't see any module in the task at all, it's not a task really
if action is None:
if non_task_ds: # there was one non-task action, but we couldn't find it
bad_action = list(non_task_ds.keys())[0]
raise AnsibleParserError("couldn't resolve module/action '{0}'. This often indicates a "
"misspelling, missing collection, or incorrect module path.".format(bad_action),
obj=self._task_ds)
else:
raise AnsibleParserError("no module/action detected in task.",
obj=self._task_ds)
elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
templar = Templar(loader=None)
raw_params = args.pop('_raw_params')
if templar.is_template(raw_params):
args['_variable_params'] = raw_params
else:
raise AnsibleParserError(
"this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES_SIMPLE)),
obj=self._task_ds)
return (action, args, delegate_to)
| 14,742
|
Python
|
.py
| 305
| 38.039344
| 154
| 0.623122
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,030
|
constructor.py
|
ansible_ansible/lib/ansible/parsing/yaml/constructor.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from yaml.constructor import SafeConstructor, ConstructorError
from yaml.nodes import MappingNode
from ansible import constants as C
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode, AnsibleVaultEncryptedUnicode
from ansible.parsing.vault import VaultLib
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import wrap_var
display = Display()
class AnsibleConstructor(SafeConstructor):
def __init__(self, file_name=None, vault_secrets=None):
self._ansible_file_name = file_name
super(AnsibleConstructor, self).__init__()
self._vaults = {}
self.vault_secrets = vault_secrets or []
self._vaults['default'] = VaultLib(secrets=self.vault_secrets)
def construct_yaml_map(self, node):
data = AnsibleMapping()
yield data
value = self.construct_mapping(node)
data.update(value)
data.ansible_pos = self._node_position_info(node)
def construct_mapping(self, node, deep=False):
# Most of this is from yaml.constructor.SafeConstructor. We replicate
# it here so that we can warn users when they have duplicate dict keys
# (pyyaml silently allows overwriting keys)
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
self.flatten_mapping(node)
mapping = AnsibleMapping()
# Add our extra information to the returned value
mapping.ansible_pos = self._node_position_info(node)
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
if key in mapping:
msg = (u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).'
u' Using last defined value only.'.format(key, *mapping.ansible_pos))
if C.DUPLICATE_YAML_DICT_KEY == 'warn':
display.warning(msg)
elif C.DUPLICATE_YAML_DICT_KEY == 'error':
raise ConstructorError(context=None, context_mark=None,
problem=to_native(msg),
problem_mark=node.start_mark,
note=None)
else:
# when 'ignore'
display.debug(msg)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
value = self.construct_scalar(node)
ret = AnsibleUnicode(value)
ret.ansible_pos = self._node_position_info(node)
return ret
def construct_vault_encrypted_unicode(self, node):
value = self.construct_scalar(node)
b_ciphertext_data = to_bytes(value)
# could pass in a key id here to choose the vault to associate with
# TODO/FIXME: plugin vault selector
vault = self._vaults['default']
if vault.secrets is None:
raise ConstructorError(context=None, context_mark=None,
problem="found !vault but no vault password provided",
problem_mark=node.start_mark,
note=None)
ret = AnsibleVaultEncryptedUnicode(b_ciphertext_data)
ret.vault = vault
ret.ansible_pos = self._node_position_info(node)
return ret
def construct_yaml_seq(self, node):
data = AnsibleSequence()
yield data
data.extend(self.construct_sequence(node))
data.ansible_pos = self._node_position_info(node)
def construct_yaml_unsafe(self, node):
try:
constructor = getattr(node, 'id', 'object')
if constructor is not None:
constructor = getattr(self, 'construct_%s' % constructor)
except AttributeError:
constructor = self.construct_object
value = constructor(node)
return wrap_var(value)
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
# Add one so that the first line is line 1 rather than line 0
column = node.start_mark.column + 1
line = node.start_mark.line + 1
# in some cases, we may have pre-read the data and then
# passed it to the load() call for YAML, in which case we
# want to override the default datasource (which would be
# '<string>') to the actual filename we read in
datasource = self._ansible_file_name or node.start_mark.name
return (datasource, line, column)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map) # type: ignore[type-var]
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
AnsibleConstructor.construct_yaml_map) # type: ignore[type-var]
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:str',
AnsibleConstructor.construct_yaml_str) # type: ignore[type-var]
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
AnsibleConstructor.construct_yaml_str) # type: ignore[type-var]
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
AnsibleConstructor.construct_yaml_seq) # type: ignore[type-var]
AnsibleConstructor.add_constructor(
u'!unsafe',
AnsibleConstructor.construct_yaml_unsafe) # type: ignore[type-var]
AnsibleConstructor.add_constructor(
u'!vault',
AnsibleConstructor.construct_vault_encrypted_unicode) # type: ignore[type-var]
AnsibleConstructor.add_constructor(
u'!vault-encrypted',
AnsibleConstructor.construct_vault_encrypted_unicode) # type: ignore[type-var]
| 7,143
|
Python
|
.py
| 145
| 39.634483
| 120
| 0.656569
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,031
|
loader.py
|
ansible_ansible/lib/ansible/parsing/yaml/loader.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.module_utils.common.yaml import HAS_LIBYAML, Parser
if HAS_LIBYAML:
class AnsibleLoader(Parser, AnsibleConstructor, Resolver): # type: ignore[misc] # pylint: disable=inconsistent-mro
def __init__(self, stream, file_name=None, vault_secrets=None):
Parser.__init__(self, stream)
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver): # type: ignore[misc,no-redef] # pylint: disable=inconsistent-mro
def __init__(self, stream, file_name=None, vault_secrets=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
| 1,924
|
Python
|
.py
| 38
| 45.710526
| 156
| 0.722488
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,032
|
dumper.py
|
ansible_ansible/lib/ansible/parsing/yaml/dumper.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import yaml
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common.yaml import SafeDumper
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes, NativeJinjaUnsafeText, NativeJinjaText
from ansible.template import AnsibleUndefined
from ansible.vars.hostvars import HostVars, HostVarsVars
from ansible.vars.manager import VarsWithSources
class AnsibleDumper(SafeDumper):
"""
A simple stub class that allows us to add representers
for our overridden object types.
"""
def represent_hostvars(self, data):
return self.represent_dict(dict(data))
# Note: only want to represent the encrypted data
def represent_vault_encrypted_unicode(self, data):
return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|')
def represent_unicode(self, data):
return yaml.representer.SafeRepresenter.represent_str(self, text_type(data))
def represent_binary(self, data):
return yaml.representer.SafeRepresenter.represent_binary(self, binary_type(data))
def represent_undefined(self, data):
# Here bool will ensure _fail_with_undefined_error happens
# if the value is Undefined.
# This happens because Jinja sets __bool__ on StrictUndefined
return bool(data)
AnsibleDumper.add_representer(
AnsibleUnicode,
represent_unicode,
)
AnsibleDumper.add_representer(
AnsibleUnsafeText,
represent_unicode,
)
AnsibleDumper.add_representer(
AnsibleUnsafeBytes,
represent_binary,
)
AnsibleDumper.add_representer(
HostVars,
represent_hostvars,
)
AnsibleDumper.add_representer(
HostVarsVars,
represent_hostvars,
)
AnsibleDumper.add_representer(
VarsWithSources,
represent_hostvars,
)
AnsibleDumper.add_representer(
AnsibleSequence,
yaml.representer.SafeRepresenter.represent_list,
)
AnsibleDumper.add_representer(
AnsibleMapping,
yaml.representer.SafeRepresenter.represent_dict,
)
AnsibleDumper.add_representer(
AnsibleVaultEncryptedUnicode,
represent_vault_encrypted_unicode,
)
AnsibleDumper.add_representer(
AnsibleUndefined,
represent_undefined,
)
AnsibleDumper.add_representer(
NativeJinjaUnsafeText,
represent_unicode,
)
AnsibleDumper.add_representer(
NativeJinjaText,
represent_unicode,
)
| 3,173
|
Python
|
.py
| 92
| 31.619565
| 118
| 0.798559
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,033
|
__init__.py
|
ansible_ansible/lib/ansible/parsing/yaml/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
| 749
|
Python
|
.py
| 17
| 43
| 70
| 0.77565
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,034
|
objects.py
|
ansible_ansible/lib/ansible/parsing/yaml/objects.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import sys as _sys
from collections.abc import Sequence
from ansible.module_utils.six import text_type
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
class AnsibleBaseYAMLObject(object):
"""
the base class used to sub-class python built-in objects
so that we can add attributes to them during yaml parsing
"""
_data_source = None
_line_number = 0
_column_number = 0
def _get_ansible_position(self):
return (self._data_source, self._line_number, self._column_number)
def _set_ansible_position(self, obj):
try:
(src, line, col) = obj
except (TypeError, ValueError):
raise AssertionError(
'ansible_pos can only be set with a tuple/list '
'of three values: source, line number, column number'
)
self._data_source = src
self._line_number = line
self._column_number = col
ansible_pos = property(_get_ansible_position, _set_ansible_position)
class AnsibleMapping(AnsibleBaseYAMLObject, dict):
""" sub class for dictionaries """
pass
class AnsibleUnicode(AnsibleBaseYAMLObject, text_type):
""" sub class for unicode objects """
pass
class AnsibleSequence(AnsibleBaseYAMLObject, list):
""" sub class for lists """
pass
class AnsibleVaultEncryptedUnicode(Sequence, AnsibleBaseYAMLObject):
"""Unicode like object that is not evaluated (decrypted) until it needs to be"""
__UNSAFE__ = True
__ENCRYPTED__ = True
yaml_tag = u'!vault'
@classmethod
def from_plaintext(cls, seq, vault, secret):
if not vault:
raise vault.AnsibleVaultError('Error creating AnsibleVaultEncryptedUnicode, invalid vault (%s) provided' % vault)
ciphertext = vault.encrypt(seq, secret)
avu = cls(ciphertext)
avu.vault = vault
return avu
def __init__(self, ciphertext):
"""A AnsibleUnicode with a Vault attribute that can decrypt it.
ciphertext is a byte string (str on PY2, bytestring on PY3).
The .data attribute is a property that returns the decrypted plaintext
of the ciphertext as a PY2 unicode or PY3 string object.
"""
super(AnsibleVaultEncryptedUnicode, self).__init__()
# after construction, calling code has to set the .vault attribute to a vaultlib object
self.vault = None
self._ciphertext = to_bytes(ciphertext)
@property
def data(self):
if not self.vault:
return to_text(self._ciphertext)
return to_text(self.vault.decrypt(self._ciphertext, obj=self))
@data.setter
def data(self, value):
self._ciphertext = to_bytes(value)
def is_encrypted(self):
return self.vault and self.vault.is_encrypted(self._ciphertext)
def __eq__(self, other):
if self.vault:
return other == self.data
return False
def __ne__(self, other):
if self.vault:
return other != self.data
return True
def __reversed__(self):
# This gets inherited from ``collections.Sequence`` which returns a generator
# make this act more like the string implementation
return to_text(self[::-1], errors='surrogate_or_strict')
def __str__(self):
return to_native(self.data, errors='surrogate_or_strict')
def __unicode__(self):
return to_text(self.data, errors='surrogate_or_strict')
def encode(self, encoding=None, errors=None):
return to_bytes(self.data, encoding=encoding, errors=errors)
# Methods below are a copy from ``collections.UserString``
# Some are copied as is, where others are modified to not
# auto wrap with ``self.__class__``
def __repr__(self):
return repr(self.data)
def __int__(self, base=10):
return int(self.data, base=base)
def __float__(self):
return float(self.data)
def __complex__(self):
return complex(self.data)
def __hash__(self):
return hash(self.data)
# This breaks vault, do not define it, we cannot satisfy this
# def __getnewargs__(self):
# return (self.data[:],)
def __lt__(self, string):
if isinstance(string, AnsibleVaultEncryptedUnicode):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, AnsibleVaultEncryptedUnicode):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, AnsibleVaultEncryptedUnicode):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, AnsibleVaultEncryptedUnicode):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, AnsibleVaultEncryptedUnicode):
char = char.data
return char in self.data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def __getslice__(self, start, end):
start = max(start, 0)
end = max(end, 0)
return self.data[start:end]
def __add__(self, other):
if isinstance(other, AnsibleVaultEncryptedUnicode):
return self.data + other.data
elif isinstance(other, text_type):
return self.data + other
return self.data + to_text(other)
def __radd__(self, other):
if isinstance(other, text_type):
return other + self.data
return to_text(other) + self.data
def __mul__(self, n):
return self.data * n
__rmul__ = __mul__
def __mod__(self, args):
return self.data % args
def __rmod__(self, template):
return to_text(template) % self
# the following methods are defined in alphabetical order:
def capitalize(self):
return self.data.capitalize()
def casefold(self):
return self.data.casefold()
def center(self, width, *args):
return self.data.center(width, *args)
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, AnsibleVaultEncryptedUnicode):
sub = sub.data
return self.data.count(sub, start, end)
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.data.expandtabs(tabsize)
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, AnsibleVaultEncryptedUnicode):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def format_map(self, mapping):
return self.data.format_map(mapping)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self):
return self.data.isalpha()
def isalnum(self):
return self.data.isalnum()
def isascii(self):
return self.data.isascii()
def isdecimal(self):
return self.data.isdecimal()
def isdigit(self):
return self.data.isdigit()
def isidentifier(self):
return self.data.isidentifier()
def islower(self):
return self.data.islower()
def isnumeric(self):
return self.data.isnumeric()
def isprintable(self):
return self.data.isprintable()
def isspace(self):
return self.data.isspace()
def istitle(self):
return self.data.istitle()
def isupper(self):
return self.data.isupper()
def join(self, seq):
return self.data.join(seq)
def ljust(self, width, *args):
return self.data.ljust(width, *args)
def lower(self):
return self.data.lower()
def lstrip(self, chars=None):
return self.data.lstrip(chars)
maketrans = str.maketrans
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, AnsibleVaultEncryptedUnicode):
old = old.data
if isinstance(new, AnsibleVaultEncryptedUnicode):
new = new.data
return self.data.replace(old, new, maxsplit)
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, AnsibleVaultEncryptedUnicode):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.data.rjust(width, *args)
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.data.rstrip(chars)
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False):
return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None):
return self.data.strip(chars)
def swapcase(self):
return self.data.swapcase()
def title(self):
return self.data.title()
def translate(self, *args):
return self.data.translate(*args)
def upper(self):
return self.data.upper()
def zfill(self, width):
return self.data.zfill(width)
| 10,456
|
Python
|
.py
| 262
| 32.553435
| 125
| 0.654452
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,035
|
__init__.py
|
ansible_ansible/lib/ansible/parsing/vault/__init__.py
|
# (c) 2014, James Tanner <tanner.jc@gmail.com>
# (c) 2016, Adrian Likins <alikins@redhat.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import errno
import fcntl
import os
import random
import shlex
import shutil
import subprocess
import sys
import tempfile
import warnings
from binascii import hexlify
from binascii import unhexlify
from binascii import Error as BinasciiError
HAS_CRYPTOGRAPHY = False
CRYPTOGRAPHY_BACKEND = None
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, padding
from cryptography.hazmat.primitives.hmac import HMAC
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.ciphers import (
Cipher as C_Cipher, algorithms, modes
)
CRYPTOGRAPHY_BACKEND = default_backend()
HAS_CRYPTOGRAPHY = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible import constants as C
from ansible.module_utils.six import binary_type
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible.utils.display import Display
from ansible.utils.path import makedirs_safe, unfrackpath
display = Display()
b_HEADER = b'$ANSIBLE_VAULT'
CIPHER_ALLOWLIST = frozenset((u'AES256',))
CIPHER_WRITE_ALLOWLIST = frozenset((u'AES256',))
# See also CIPHER_MAPPING at the bottom of the file which maps cipher strings
# (used in VaultFile header) to a cipher class
NEED_CRYPTO_LIBRARY = "ansible-vault requires the cryptography library in order to function"
class AnsibleVaultError(AnsibleError):
pass
class AnsibleVaultPasswordError(AnsibleVaultError):
pass
class AnsibleVaultFormatError(AnsibleError):
pass
def is_encrypted(data):
""" Test if this is vault encrypted data blob
:arg data: a byte or text string to test whether it is recognized as vault
encrypted data
:returns: True if it is recognized. Otherwise, False.
"""
try:
# Make sure we have a byte string and that it only contains ascii
# bytes.
b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict')
except (UnicodeError, TypeError):
# The vault format is pure ascii so if we failed to encode to bytes
# via ascii we know that this is not vault data.
# Similarly, if it's not a string, it's not vault data
return False
if b_data.startswith(b_HEADER):
return True
return False
def is_encrypted_file(file_obj, start_pos=0, count=len(b_HEADER)):
"""Test if the contents of a file obj are a vault encrypted data blob.
:arg file_obj: A file object that will be read from.
:kwarg start_pos: A byte offset in the file to start reading the header
from. Defaults to 0, the beginning of the file.
:kwarg count: Read up to this number of bytes from the file to determine
if it looks like encrypted vault data. The default is the size of the
the vault header, which is what is needed most times.
For some IO classes, or files that don't begin with the vault itself,
set to -1 to read to the end of file.
:returns: True if the file looks like a vault file. Otherwise, False.
"""
# read the header and reset the file stream to where it started
current_position = file_obj.tell()
try:
file_obj.seek(start_pos)
return is_encrypted(file_obj.read(count))
finally:
file_obj.seek(current_position)
def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
b_tmpdata = b_vaulttext_envelope.splitlines()
b_tmpheader = b_tmpdata[0].strip().split(b';')
b_version = b_tmpheader[1].strip()
cipher_name = to_text(b_tmpheader[2].strip())
vault_id = default_vault_id
# Only attempt to find vault_id if the vault file is version 1.2 or newer
# if self.b_version == b'1.2':
if len(b_tmpheader) >= 4:
vault_id = to_text(b_tmpheader[3].strip())
b_ciphertext = b''.join(b_tmpdata[1:])
return b_ciphertext, b_version, cipher_name, vault_id
def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filename=None):
"""Parse the vaulttext envelope
When data is saved, it has a header prepended and is formatted into 80
character lines. This method extracts the information from the header
and then removes the header and the inserted newlines. The string returned
is suitable for processing by the Cipher classes.
:arg b_vaulttext: byte str containing the data from a save file
:kwarg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
:kwarg filename: The filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted. This is optional.
:returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext,
a byte str of the vault format version,
the name of the cipher used, and the vault_id.
:raises: AnsibleVaultFormatError: if the vaulttext_envelope format is invalid
"""
# used by decrypt
default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY
try:
return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id)
except Exception as exc:
msg = "Vault envelope format error"
if filename:
msg += ' in %s' % (filename)
msg += ': %s' % exc
raise AnsibleVaultFormatError(msg)
def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None):
""" Add header and format to 80 columns
:arg b_ciphertext: the encrypted and hexlified data as a byte string
:arg cipher_name: unicode cipher name (for ex, u'AES256')
:arg version: unicode vault version (for ex, '1.2'). Optional ('1.1' is default)
:arg vault_id: unicode vault identifier. If provided, the version will be bumped to 1.2.
:returns: a byte str that should be dumped into a file. It's
formatted to 80 char columns and has the header prepended
"""
if not cipher_name:
raise AnsibleError("the cipher must be set before adding a header")
version = version or '1.1'
# If we specify a vault_id, use format version 1.2. For no vault_id, stick to 1.1
if vault_id and vault_id != u'default':
version = '1.2'
b_version = to_bytes(version, 'utf-8', errors='strict')
b_vault_id = to_bytes(vault_id, 'utf-8', errors='strict')
b_cipher_name = to_bytes(cipher_name, 'utf-8', errors='strict')
header_parts = [b_HEADER,
b_version,
b_cipher_name]
if b_version == b'1.2' and b_vault_id:
header_parts.append(b_vault_id)
header = b';'.join(header_parts)
b_vaulttext = [header]
b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)]
b_vaulttext += [b'']
b_vaulttext = b'\n'.join(b_vaulttext)
return b_vaulttext
def _unhexlify(b_data):
try:
return unhexlify(b_data)
except (BinasciiError, TypeError) as exc:
raise AnsibleVaultFormatError('Vault format unhexlify error: %s' % exc)
def _parse_vaulttext(b_vaulttext):
b_vaulttext = _unhexlify(b_vaulttext)
b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b"\n", 2)
b_salt = _unhexlify(b_salt)
b_ciphertext = _unhexlify(b_ciphertext)
return b_ciphertext, b_salt, b_crypted_hmac
def parse_vaulttext(b_vaulttext):
"""Parse the vaulttext
:arg b_vaulttext: byte str containing the vaulttext (ciphertext, salt, crypted_hmac)
:returns: A tuple of byte str of the ciphertext suitable for passing to a
Cipher class's decrypt() function, a byte str of the salt,
and a byte str of the crypted_hmac
:raises: AnsibleVaultFormatError: if the vaulttext format is invalid
"""
# SPLIT SALT, DIGEST, AND DATA
try:
return _parse_vaulttext(b_vaulttext)
except AnsibleVaultFormatError:
raise
except Exception as exc:
msg = "Vault vaulttext format error: %s" % exc
raise AnsibleVaultFormatError(msg)
def verify_secret_is_not_empty(secret, msg=None):
"""Check the secret against minimal requirements.
Raises: AnsibleVaultPasswordError if the password does not meet requirements.
Currently, only requirement is that the password is not None or an empty string.
"""
msg = msg or 'Invalid vault password was provided'
if not secret:
raise AnsibleVaultPasswordError(msg)
class VaultSecret:
"""Opaque/abstract objects for a single vault secret. ie, a password or a key."""
def __init__(self, _bytes=None):
# FIXME: ? that seems wrong... Unset etc?
self._bytes = _bytes
@property
def bytes(self):
"""The secret as a bytestring.
Sub classes that store text types will need to override to encode the text to bytes.
"""
return self._bytes
def load(self):
return self._bytes
class PromptVaultSecret(VaultSecret):
default_prompt_formats = ["Vault password (%s): "]
def __init__(self, _bytes=None, vault_id=None, prompt_formats=None):
super(PromptVaultSecret, self).__init__(_bytes=_bytes)
self.vault_id = vault_id
if prompt_formats is None:
self.prompt_formats = self.default_prompt_formats
else:
self.prompt_formats = prompt_formats
@property
def bytes(self):
return self._bytes
def load(self):
self._bytes = self.ask_vault_passwords()
def ask_vault_passwords(self):
b_vault_passwords = []
for prompt_format in self.prompt_formats:
prompt = prompt_format % {'vault_id': self.vault_id}
try:
vault_pass = display.prompt(prompt, private=True)
except EOFError:
raise AnsibleVaultError('EOFError (ctrl-d) on prompt for (%s)' % self.vault_id)
verify_secret_is_not_empty(vault_pass)
b_vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
b_vault_passwords.append(b_vault_pass)
# Make sure the passwords match by comparing them all to the first password
for b_vault_password in b_vault_passwords:
self.confirm(b_vault_passwords[0], b_vault_password)
if b_vault_passwords:
return b_vault_passwords[0]
return None
def confirm(self, b_vault_pass_1, b_vault_pass_2):
# enforce no newline chars at the end of passwords
if b_vault_pass_1 != b_vault_pass_2:
# FIXME: more specific exception
raise AnsibleError("Passwords do not match")
def script_is_client(filename):
"""Determine if a vault secret script is a client script that can be given --vault-id args"""
# if password script is 'something-client' or 'something-client.[sh|py|rb|etc]'
# script_name can still have '.' or could be entire filename if there is no ext
script_name, dummy = os.path.splitext(filename)
# TODO: for now, this is entirely based on filename
if script_name.endswith('-client'):
return True
return False
def get_file_vault_secret(filename=None, vault_id=None, encoding=None, loader=None):
""" Get secret from file content or execute file and get secret from stdout """
# we unfrack but not follow the full path/context to possible vault script
# so when the script uses 'adjacent' file for configuration or similar
# it still works (as inventory scripts often also do).
# while files from --vault-password-file are already unfracked, other sources are not
this_path = unfrackpath(filename, follow=False)
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
if os.path.isdir(this_path):
raise AnsibleError(f"The vault password file provided '{this_path}' can not be a directory")
# it is a script?
if loader.is_executable(this_path):
if script_is_client(filename):
# this is special script type that handles vault ids
display.vvvv(u'The vault password file %s is a client script.' % to_text(this_path))
# TODO: pass vault_id_name to script via cli
return ClientScriptVaultSecret(filename=this_path, vault_id=vault_id, encoding=encoding, loader=loader)
# just a plain vault password script. No args, returns a byte array
return ScriptVaultSecret(filename=this_path, encoding=encoding, loader=loader)
return FileVaultSecret(filename=this_path, encoding=encoding, loader=loader)
# TODO: mv these classes to a separate file so we don't pollute vault with 'subprocess' etc
class FileVaultSecret(VaultSecret):
def __init__(self, filename=None, encoding=None, loader=None):
super(FileVaultSecret, self).__init__()
self.filename = filename
self.loader = loader
self.encoding = encoding or 'utf8'
# We could load from file here, but that is eventually a pain to test
self._bytes = None
self._text = None
@property
def bytes(self):
if self._bytes:
return self._bytes
if self._text:
return self._text.encode(self.encoding)
return None
def load(self):
self._bytes = self._read_file(self.filename)
def _read_file(self, filename):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
# TODO: replace with use of self.loader
try:
with open(filename, "rb") as f:
vault_pass = f.read().strip()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (filename, e))
b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass, filename)
vault_pass = b_vault_data.strip(b'\r\n')
verify_secret_is_not_empty(vault_pass,
msg='Invalid vault password was provided from file (%s)' % filename)
return vault_pass
def __repr__(self):
if self.filename:
return "%s(filename='%s')" % (self.__class__.__name__, self.filename)
return "%s()" % (self.__class__.__name__)
class ScriptVaultSecret(FileVaultSecret):
def _read_file(self, filename):
if not self.loader.is_executable(filename):
raise AnsibleVaultError("The vault password script %s was not executable" % filename)
command = self._build_command()
stdout, stderr, p = self._run(command)
self._check_results(stdout, stderr, p)
vault_pass = stdout.strip(b'\r\n')
empty_password_msg = 'Invalid vault password was provided from script (%s)' % filename
verify_secret_is_not_empty(vault_pass, msg=empty_password_msg)
return vault_pass
def _run(self, command):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError as e:
msg_format = "Problem running vault password script %s (%s)." \
" If this is not a script, remove the executable bit from the file."
msg = msg_format % (self.filename, e)
raise AnsibleError(msg)
stdout, stderr = p.communicate()
return stdout, stderr, p
def _check_results(self, stdout, stderr, popen):
if popen.returncode != 0:
raise AnsibleError("Vault password script %s returned non-zero (%s): %s" %
(self.filename, popen.returncode, stderr))
def _build_command(self):
return [self.filename]
class ClientScriptVaultSecret(ScriptVaultSecret):
VAULT_ID_UNKNOWN_RC = 2
def __init__(self, filename=None, encoding=None, loader=None, vault_id=None):
super(ClientScriptVaultSecret, self).__init__(filename=filename,
encoding=encoding,
loader=loader)
self._vault_id = vault_id
display.vvvv(u'Executing vault password client script: %s --vault-id %s' % (to_text(filename), to_text(vault_id)))
def _run(self, command):
try:
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
msg_format = "Problem running vault password client script %s (%s)." \
" If this is not a script, remove the executable bit from the file."
msg = msg_format % (self.filename, e)
raise AnsibleError(msg)
stdout, stderr = p.communicate()
return stdout, stderr, p
def _check_results(self, stdout, stderr, popen):
if popen.returncode == self.VAULT_ID_UNKNOWN_RC:
raise AnsibleError('Vault password client script %s did not find a secret for vault-id=%s: %s' %
(self.filename, self._vault_id, stderr))
if popen.returncode != 0:
raise AnsibleError("Vault password client script %s returned non-zero (%s) when getting secret for vault-id=%s: %s" %
(self.filename, popen.returncode, self._vault_id, stderr))
def _build_command(self):
command = [self.filename]
if self._vault_id:
command.extend(['--vault-id', self._vault_id])
return command
def __repr__(self):
if self.filename:
return "%s(filename='%s', vault_id='%s')" % \
(self.__class__.__name__, self.filename, self._vault_id)
return "%s()" % (self.__class__.__name__)
def match_secrets(secrets, target_vault_ids):
"""Find all VaultSecret objects that are mapped to any of the target_vault_ids in secrets"""
if not secrets:
return []
matches = [(vault_id, secret) for vault_id, secret in secrets if vault_id in target_vault_ids]
return matches
def match_best_secret(secrets, target_vault_ids):
"""Find the best secret from secrets that matches target_vault_ids
Since secrets should be ordered so the early secrets are 'better' than later ones, this
just finds all the matches, then returns the first secret"""
matches = match_secrets(secrets, target_vault_ids)
if matches:
return matches[0]
# raise exception?
return None
def match_encrypt_vault_id_secret(secrets, encrypt_vault_id=None):
# See if the --encrypt-vault-id matches a vault-id
display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
if encrypt_vault_id is None:
raise AnsibleError('match_encrypt_vault_id_secret requires a non None encrypt_vault_id')
encrypt_vault_id_matchers = [encrypt_vault_id]
encrypt_secret = match_best_secret(secrets, encrypt_vault_id_matchers)
# return the best match for --encrypt-vault-id
if encrypt_secret:
return encrypt_secret
# If we specified a encrypt_vault_id and we couldn't find it, dont
# fallback to using the first/best secret
raise AnsibleVaultError('Did not find a match for --encrypt-vault-id=%s in the known vault-ids %s' % (encrypt_vault_id,
[_v for _v, _vs in secrets]))
def match_encrypt_secret(secrets, encrypt_vault_id=None):
"""Find the best/first/only secret in secrets to use for encrypting"""
display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
# See if the --encrypt-vault-id matches a vault-id
if encrypt_vault_id:
return match_encrypt_vault_id_secret(secrets,
encrypt_vault_id=encrypt_vault_id)
# Find the best/first secret from secrets since we didnt specify otherwise
# ie, consider all of the available secrets as matches
_vault_id_matchers = [_vault_id for _vault_id, dummy in secrets]
best_secret = match_best_secret(secrets, _vault_id_matchers)
# can be empty list sans any tuple
return best_secret
class VaultLib:
def __init__(self, secrets=None):
self.secrets = secrets or []
self.cipher_name = None
self.b_version = b'1.2'
@staticmethod
def is_encrypted(vaulttext):
return is_encrypted(vaulttext)
def encrypt(self, plaintext, secret=None, vault_id=None, salt=None):
"""Vault encrypt a piece of data.
:arg plaintext: a text or byte string to encrypt.
:returns: a utf-8 encoded byte str of encrypted data. The string
contains a header identifying this as vault encrypted data and
formatted to newline terminated lines of 80 characters. This is
suitable for dumping as is to a vault file.
If the string passed in is a text string, it will be encoded to UTF-8
before encryption.
"""
if secret is None:
if self.secrets:
dummy, secret = match_encrypt_secret(self.secrets)
else:
raise AnsibleVaultError("A vault password must be specified to encrypt data")
b_plaintext = to_bytes(plaintext, errors='surrogate_or_strict')
if is_encrypted(b_plaintext):
raise AnsibleError("input is already encrypted")
if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_ALLOWLIST:
self.cipher_name = u"AES256"
try:
this_cipher = CIPHER_MAPPING[self.cipher_name]()
except KeyError:
raise AnsibleError(u"{0} cipher could not be found".format(self.cipher_name))
# encrypt data
if vault_id:
display.vvvvv(u'Encrypting with vault_id "%s" and vault secret %s' % (to_text(vault_id), to_text(secret)))
else:
display.vvvvv(u'Encrypting without a vault_id using vault secret %s' % to_text(secret))
b_ciphertext = this_cipher.encrypt(b_plaintext, secret, salt)
# format the data for output to the file
b_vaulttext = format_vaulttext_envelope(b_ciphertext,
self.cipher_name,
vault_id=vault_id)
return b_vaulttext
def decrypt(self, vaulttext, filename=None, obj=None):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
:kwarg filename: a filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted.
:returns: a byte string containing the decrypted data and the vault-id that was used
"""
plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename, obj=obj)
return plaintext
def decrypt_and_get_vault_id(self, vaulttext, filename=None, obj=None):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
:kwarg filename: a filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted.
:returns: a byte string containing the decrypted data and the vault-id vault-secret that was used
"""
b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8')
if self.secrets is None:
msg = "A vault password must be specified to decrypt data"
if filename:
msg += " in file %s" % to_native(filename)
raise AnsibleVaultError(msg)
if not is_encrypted(b_vaulttext):
msg = "input is not vault encrypted data. "
if filename:
msg += "%s is not a vault encrypted file" % to_native(filename)
raise AnsibleError(msg)
b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename)
# create the cipher object, note that the cipher used for decrypt can
# be different than the cipher used for encrypt
if cipher_name in CIPHER_ALLOWLIST:
this_cipher = CIPHER_MAPPING[cipher_name]()
else:
raise AnsibleError("{0} cipher could not be found".format(cipher_name))
b_plaintext = None
if not self.secrets:
raise AnsibleVaultError('Attempting to decrypt but no vault secrets found')
# WARNING: Currently, the vault id is not required to match the vault id in the vault blob to
# decrypt a vault properly. The vault id in the vault blob is not part of the encrypted
# or signed vault payload. There is no cryptographic checking/verification/validation of the
# vault blobs vault id. It can be tampered with and changed. The vault id is just a nick
# name to use to pick the best secret and provide some ux/ui info.
# iterate over all the applicable secrets (all of them by default) until one works...
# if we specify a vault_id, only the corresponding vault secret is checked and
# we check it first.
vault_id_matchers = []
vault_id_used = None
vault_secret_used = None
if vault_id:
display.vvvvv(u'Found a vault_id (%s) in the vaulttext' % to_text(vault_id))
vault_id_matchers.append(vault_id)
_matches = match_secrets(self.secrets, vault_id_matchers)
if _matches:
display.vvvvv(u'We have a secret associated with vault id (%s), will try to use to decrypt %s' % (to_text(vault_id), to_text(filename)))
else:
display.vvvvv(u'Found a vault_id (%s) in the vault text, but we do not have a associated secret (--vault-id)' % to_text(vault_id))
# Not adding the other secrets to vault_secret_ids enforces a match between the vault_id from the vault_text and
# the known vault secrets.
if not C.DEFAULT_VAULT_ID_MATCH:
# Add all of the known vault_ids as candidates for decrypting a vault.
vault_id_matchers.extend([_vault_id for _vault_id, _dummy in self.secrets if _vault_id != vault_id])
matched_secrets = match_secrets(self.secrets, vault_id_matchers)
# for vault_secret_id in vault_secret_ids:
for vault_secret_id, vault_secret in matched_secrets:
display.vvvvv(u'Trying to use vault secret=(%s) id=%s to decrypt %s' % (to_text(vault_secret), to_text(vault_secret_id), to_text(filename)))
try:
# secret = self.secrets[vault_secret_id]
display.vvvv(u'Trying secret %s for vault_id=%s' % (to_text(vault_secret), to_text(vault_secret_id)))
b_plaintext = this_cipher.decrypt(b_vaulttext, vault_secret)
if b_plaintext is not None:
vault_id_used = vault_secret_id
vault_secret_used = vault_secret
file_slug = ''
if filename:
file_slug = ' of "%s"' % filename
display.vvvvv(
u'Decrypt%s successful with secret=%s and vault_id=%s' % (to_text(file_slug), to_text(vault_secret), to_text(vault_secret_id))
)
break
except AnsibleVaultFormatError as exc:
exc.obj = obj
msg = u"There was a vault format error"
if filename:
msg += u' in %s' % (to_text(filename))
msg += u': %s' % to_text(exc)
display.warning(msg, formatted=True)
raise
except AnsibleError as e:
display.vvvv(u'Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' %
(to_text(vault_secret_id), to_text(filename), e))
continue
else:
msg = "Decryption failed (no vault secrets were found that could decrypt)"
if filename:
msg += " on %s" % to_native(filename)
raise AnsibleVaultError(msg)
if b_plaintext is None:
msg = "Decryption failed"
if filename:
msg += " on %s" % to_native(filename)
raise AnsibleError(msg)
return b_plaintext, vault_id_used, vault_secret_used
class VaultEditor:
def __init__(self, vault=None):
# TODO: it may be more useful to just make VaultSecrets and index of VaultLib objects...
self.vault = vault or VaultLib()
# TODO: mv shred file stuff to it's own class
def _shred_file_custom(self, tmp_path):
""""Destroy a file, when shred (core-utils) is not available
Unix `shred' destroys files "so that they can be recovered only with great difficulty with
specialised hardware, if at all". It is based on the method from the paper
"Secure Deletion of Data from Magnetic and Solid-State Memory",
Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
We do not go to that length to re-implement shred in Python; instead, overwriting with a block
of random data should suffice.
See https://github.com/ansible/ansible/pull/13700 .
"""
file_len = os.path.getsize(tmp_path)
if file_len > 0: # avoid work when file was empty
max_chunk_len = min(1024 * 1024 * 2, file_len)
passes = 3
with open(tmp_path, "wb") as fh:
for dummy in range(passes):
fh.seek(0, 0)
# get a random chunk of data, each pass with other length
chunk_len = random.randint(max_chunk_len // 2, max_chunk_len)
data = os.urandom(chunk_len)
for dummy in range(0, file_len // chunk_len):
fh.write(data)
fh.write(data[:file_len % chunk_len])
# FIXME remove this assert once we have unittests to check its accuracy
if fh.tell() != file_len:
raise AnsibleAssertionError()
os.fsync(fh)
def _shred_file(self, tmp_path):
"""Securely destroy a decrypted file
Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
it is a non-issue.
Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
a custom shredding method.
"""
if not os.path.isfile(tmp_path):
# file is already gone
return
try:
r = subprocess.call(['shred', tmp_path])
except (OSError, ValueError):
# shred is not available on this system, or some other error occurred.
# ValueError caught because macOS El Capitan is raising an
# exception big enough to hit a limit in python2-2.7.11 and below.
# Symptom is ValueError: insecure pickle when shred is not
# installed there.
r = 1
if r != 0:
# we could not successfully execute unix shred; therefore, do custom shred.
self._shred_file_custom(tmp_path)
os.remove(tmp_path)
def _edit_file_helper(self, filename, secret, existing_data=None, force_save=False, vault_id=None):
# Create a tempfile
root, ext = os.path.splitext(os.path.realpath(filename))
fd, tmp_path = tempfile.mkstemp(suffix=ext, dir=C.DEFAULT_LOCAL_TMP)
cmd = self._editor_shell_command(tmp_path)
try:
if existing_data:
self.write_data(existing_data, fd, shred=False)
except Exception:
# if an error happens, destroy the decrypted file
self._shred_file(tmp_path)
raise
finally:
os.close(fd)
try:
# drop the user into an editor on the tmp file
subprocess.call(cmd)
except Exception as e:
# if an error happens, destroy the decrypted file
self._shred_file(tmp_path)
raise AnsibleError('Unable to execute the command "%s": %s' % (' '.join(cmd), to_native(e)))
b_tmpdata = self.read_data(tmp_path)
# Do nothing if the content has not changed
if force_save or existing_data != b_tmpdata:
# encrypt new data and write out to tmp
# An existing vaultfile will always be UTF-8,
# so decode to unicode here
b_ciphertext = self.vault.encrypt(b_tmpdata, secret, vault_id=vault_id)
self.write_data(b_ciphertext, tmp_path)
# shuffle tmp file into place
self.shuffle_files(tmp_path, filename)
display.vvvvv(u'Saved edited file "%s" encrypted using %s and vault id "%s"' % (to_text(filename), to_text(secret), to_text(vault_id)))
# always shred temp, jic
self._shred_file(tmp_path)
def _real_path(self, filename):
# '-' is special to VaultEditor, dont expand it.
if filename == '-':
return filename
real_path = os.path.realpath(filename)
return real_path
def encrypt_bytes(self, b_plaintext, secret, vault_id=None):
b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
return b_ciphertext
def encrypt_file(self, filename, secret, vault_id=None, output_file=None):
# A file to be encrypted into a vaultfile could be any encoding
# so treat the contents as a byte string.
# follow the symlink
filename = self._real_path(filename)
b_plaintext = self.read_data(filename)
b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
self.write_data(b_ciphertext, output_file or filename)
def decrypt_file(self, filename, output_file=None):
# follow the symlink
filename = self._real_path(filename)
ciphertext = self.read_data(filename)
try:
plaintext = self.vault.decrypt(ciphertext, filename=filename)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
self.write_data(plaintext, output_file or filename, shred=False)
def create_file(self, filename, secret, vault_id=None):
""" create a new encrypted file """
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
display.warning(u"%s does not exist, creating..." % to_text(dirname))
makedirs_safe(dirname)
# FIXME: If we can raise an error here, we can probably just make it
# behave like edit instead.
if os.path.isfile(filename):
raise AnsibleError("%s exists, please use 'edit' instead" % filename)
self._edit_file_helper(filename, secret, vault_id=vault_id)
def edit_file(self, filename):
vault_id_used = None
vault_secret_used = None
# follow the symlink
filename = self._real_path(filename)
b_vaulttext = self.read_data(filename)
# vault or yaml files are always utf8
vaulttext = to_text(b_vaulttext)
try:
# vaulttext gets converted back to bytes, but alas
# TODO: return the vault_id that worked?
plaintext, vault_id_used, vault_secret_used = self.vault.decrypt_and_get_vault_id(vaulttext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
# Figure out the vault id from the file, to select the right secret to re-encrypt it
# (duplicates parts of decrypt, but alas...)
dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename)
# vault id here may not be the vault id actually used for decrypting
# as when the edited file has no vault-id but is decrypted by non-default id in secrets
# (vault_id=default, while a different vault-id decrypted)
# we want to get rid of files encrypted with the AES cipher
force_save = (cipher_name not in CIPHER_WRITE_ALLOWLIST)
# Keep the same vault-id (and version) as in the header
self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext, force_save=force_save, vault_id=vault_id)
def plaintext(self, filename):
b_vaulttext = self.read_data(filename)
vaulttext = to_text(b_vaulttext)
try:
plaintext = self.vault.decrypt(vaulttext, filename=filename)
return plaintext
except AnsibleError as e:
raise AnsibleVaultError("%s for %s" % (to_native(e), to_native(filename)))
# FIXME/TODO: make this use VaultSecret
def rekey_file(self, filename, new_vault_secret, new_vault_id=None):
# follow the symlink
filename = self._real_path(filename)
prev = os.stat(filename)
b_vaulttext = self.read_data(filename)
vaulttext = to_text(b_vaulttext)
display.vvvvv(u'Rekeying file "%s" to with new vault-id "%s" and vault secret %s' %
(to_text(filename), to_text(new_vault_id), to_text(new_vault_secret)))
try:
plaintext, vault_id_used, _dummy = self.vault.decrypt_and_get_vault_id(vaulttext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
# This is more or less an assert, see #18247
if new_vault_secret is None:
raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename)
# FIXME: VaultContext...? could rekey to a different vault_id in the same VaultSecrets
# Need a new VaultLib because the new vault data can be a different
# vault lib format or cipher (for ex, when we migrate 1.0 style vault data to
# 1.1 style data we change the version and the cipher). This is where a VaultContext might help
# the new vault will only be used for encrypting, so it doesn't need the vault secrets
# (we will pass one in directly to encrypt)
new_vault = VaultLib(secrets={})
b_new_vaulttext = new_vault.encrypt(plaintext, new_vault_secret, vault_id=new_vault_id)
self.write_data(b_new_vaulttext, filename)
# preserve permissions
os.chmod(filename, prev.st_mode)
os.chown(filename, prev.st_uid, prev.st_gid)
display.vvvvv(u'Rekeyed file "%s" (decrypted with vault id "%s") was encrypted with new vault-id "%s" and vault secret %s' %
(to_text(filename), to_text(vault_id_used), to_text(new_vault_id), to_text(new_vault_secret)))
def read_data(self, filename):
try:
if filename == '-':
data = sys.stdin.buffer.read()
else:
with open(filename, "rb") as fh:
data = fh.read()
except Exception as e:
msg = to_native(e)
if not msg:
msg = repr(e)
raise AnsibleError('Unable to read source file (%s): %s' % (to_native(filename), msg))
return data
def write_data(self, data, thefile, shred=True, mode=0o600):
# TODO: add docstrings for arg types since this code is picky about that
"""Write the data bytes to given path
This is used to write a byte string to a file or stdout. It is used for
writing the results of vault encryption or decryption. It is used for
saving the ciphertext after encryption and it is also used for saving the
plaintext after decrypting a vault. The type of the 'data' arg should be bytes,
since in the plaintext case, the original contents can be of any text encoding
or arbitrary binary data.
When used to write the result of vault encryption, the value of the 'data' arg
should be a utf-8 encoded byte string and not a text type.
When used to write the result of vault decryption, the value of the 'data' arg
should be a byte string and not a text type.
:arg data: the byte string (bytes) data
:arg thefile: file descriptor or filename to save 'data' to.
:arg shred: if shred==True, make sure that the original data is first shredded so that is cannot be recovered.
:returns: None
"""
# FIXME: do we need this now? data_bytes should always be a utf-8 byte string
b_file_data = to_bytes(data, errors='strict')
# check if we have a file descriptor instead of a path
is_fd = False
try:
is_fd = (isinstance(thefile, int) and fcntl.fcntl(thefile, fcntl.F_GETFD) != -1)
except Exception:
pass
if is_fd:
# if passed descriptor, use that to ensure secure access, otherwise it is a string.
# assumes the fd is securely opened by caller (mkstemp)
os.ftruncate(thefile, 0)
os.write(thefile, b_file_data)
elif thefile == '-':
# get a ref to either sys.stdout.buffer for py3 or plain old sys.stdout for py2
# We need sys.stdout.buffer on py3 so we can write bytes to it since the plaintext
# of the vaulted object could be anything/binary/etc
output = getattr(sys.stdout, 'buffer', sys.stdout)
output.write(b_file_data)
else:
if not os.access(os.path.dirname(thefile), os.W_OK):
raise AnsibleError("Destination '%s' not writable" % (os.path.dirname(thefile)))
# file names are insecure and prone to race conditions, so remove and create securely
if os.path.isfile(thefile):
if shred:
self._shred_file(thefile)
else:
os.remove(thefile)
# when setting new umask, we get previous as return
current_umask = os.umask(0o077)
try:
try:
# create file with secure permissions
fd = os.open(thefile, os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC, mode)
except OSError as ose:
# Want to catch FileExistsError, which doesn't exist in Python 2, so catch OSError
# and compare the error number to get equivalent behavior in Python 2/3
if ose.errno == errno.EEXIST:
raise AnsibleError('Vault file got recreated while we were operating on it: %s' % to_native(ose))
raise AnsibleError('Problem creating temporary vault file: %s' % to_native(ose))
try:
# now write to the file and ensure ours is only data in it
os.ftruncate(fd, 0)
os.write(fd, b_file_data)
except OSError as e:
raise AnsibleError('Unable to write to temporary vault file: %s' % to_native(e))
finally:
# Make sure the file descriptor is always closed and reset umask
os.close(fd)
finally:
os.umask(current_umask)
def shuffle_files(self, src, dest):
prev = None
# overwrite dest with src
if os.path.isfile(dest):
prev = os.stat(dest)
# old file 'dest' was encrypted, no need to _shred_file
os.remove(dest)
shutil.move(src, dest)
# reset permissions if needed
if prev is not None:
# TODO: selinux, ACLs, xattr?
os.chmod(dest, prev.st_mode)
os.chown(dest, prev.st_uid, prev.st_gid)
def _editor_shell_command(self, filename):
env_editor = C.config.get_config_value('EDITOR')
editor = shlex.split(env_editor)
editor.append(filename)
return editor
########################################
# CIPHERS #
########################################
class VaultAES256:
"""
Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
Keys are derived using PBKDF2
"""
# http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
# Note: strings in this class should be byte strings by default.
def __init__(self):
if not HAS_CRYPTOGRAPHY:
raise AnsibleError(NEED_CRYPTO_LIBRARY)
@staticmethod
def _create_key_cryptography(b_password, b_salt, key_length, iv_length):
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=2 * key_length + iv_length,
salt=b_salt,
iterations=10000,
backend=CRYPTOGRAPHY_BACKEND)
b_derivedkey = kdf.derive(b_password)
return b_derivedkey
@classmethod
def _gen_key_initctr(cls, b_password, b_salt):
# 16 for AES 128, 32 for AES256
key_length = 32
if HAS_CRYPTOGRAPHY:
# AES is a 128-bit block cipher, so IVs and counter nonces are 16 bytes
iv_length = algorithms.AES.block_size // 8
b_derivedkey = cls._create_key_cryptography(b_password, b_salt, key_length, iv_length)
b_iv = b_derivedkey[(key_length * 2):(key_length * 2) + iv_length]
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in initctr)')
b_key1 = b_derivedkey[:key_length]
b_key2 = b_derivedkey[key_length:(key_length * 2)]
return b_key1, b_key2, b_iv
@staticmethod
def _encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv):
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
encryptor = cipher.encryptor()
padder = padding.PKCS7(algorithms.AES.block_size).padder()
b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize())
b_ciphertext += encryptor.finalize()
# COMBINE SALT, DIGEST AND DATA
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
b_hmac = hmac.finalize()
return to_bytes(hexlify(b_hmac), errors='surrogate_or_strict'), hexlify(b_ciphertext)
@classmethod
def _get_salt(cls):
custom_salt = C.config.get_config_value('VAULT_ENCRYPT_SALT')
if not custom_salt:
custom_salt = os.urandom(32)
return to_bytes(custom_salt)
@classmethod
def encrypt(cls, b_plaintext, secret, salt=None):
if secret is None:
raise AnsibleVaultError('The secret passed to encrypt() was None')
if salt is None:
b_salt = cls._get_salt()
elif not salt:
raise AnsibleVaultError('Empty or invalid salt passed to encrypt()')
else:
b_salt = to_bytes(salt)
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_hmac, b_ciphertext = cls._encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in encrypt)')
b_vaulttext = b'\n'.join([hexlify(b_salt), b_hmac, b_ciphertext])
# Unnecessary but getting rid of it is a backwards incompatible vault
# format change
b_vaulttext = hexlify(b_vaulttext)
return b_vaulttext
@classmethod
def _decrypt_cryptography(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
# b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
# EXIT EARLY IF DIGEST DOESN'T MATCH
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
try:
hmac.verify(_unhexlify(b_crypted_hmac))
except InvalidSignature as e:
raise AnsibleVaultError('HMAC verification failed: %s' % e)
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
decryptor = cipher.decryptor()
unpadder = padding.PKCS7(128).unpadder()
b_plaintext = unpadder.update(
decryptor.update(b_ciphertext) + decryptor.finalize()
) + unpadder.finalize()
return b_plaintext
@staticmethod
def _is_equal(b_a, b_b):
"""
Comparing 2 byte arrays in constant time to avoid timing attacks.
It would be nice if there were a library for this but hey.
"""
if not (isinstance(b_a, binary_type) and isinstance(b_b, binary_type)):
raise TypeError('_is_equal can only be used to compare two byte strings')
# http://codahale.com/a-lesson-in-timing-attacks/
if len(b_a) != len(b_b):
return False
result = 0
for b_x, b_y in zip(b_a, b_b):
result |= b_x ^ b_y
return result == 0
@classmethod
def decrypt(cls, b_vaulttext, secret):
b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext)
# TODO: would be nice if a VaultSecret could be passed directly to _decrypt_*
# (move _gen_key_initctr() to a AES256 VaultSecret or VaultContext impl?)
# though, likely needs to be python cryptography specific impl that basically
# creates a Cipher() with b_key1, a Mode.CTR() with b_iv, and a HMAC() with sign key b_key2
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_plaintext = cls._decrypt_cryptography(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in decrypt)')
return b_plaintext
# Keys could be made bytes later if the code that gets the data is more
# naturally byte-oriented
CIPHER_MAPPING = {
u'AES256': VaultAES256,
}
| 51,719
|
Python
|
.py
| 997
| 42.047141
| 152
| 0.639056
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,036
|
addresses.py
|
ansible_ansible/lib/ansible/parsing/utils/addresses.py
|
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import re
from ansible.errors import AnsibleParserError, AnsibleError
# Components that match a numeric or alphanumeric begin:end or begin:end:step
# range expression inside square brackets.
numeric_range = r"""
\[
(?:[0-9]+:[0-9]+) # numeric begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
"""
hexadecimal_range = r"""
\[
(?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
"""
alphanumeric_range = r"""
\[
(?:
[a-z]:[a-z]| # one-char alphabetic range
[0-9]+:[0-9]+ # ...or a numeric one
)
(?::[0-9]+)? # numeric :step (optional)
\]
"""
# Components that match a 16-bit portion of an IPv6 address in hexadecimal
# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
# (0..255) or an [x:y(:z)] numeric range.
ipv6_component = r"""
(?:
[0-9a-f]{{1,4}}| # 0..ffff
{range} # or a numeric range
)
""".format(range=hexadecimal_range)
ipv4_component = r"""
(?:
[01]?[0-9]{{1,2}}| # 0..199
2[0-4][0-9]| # 200..249
25[0-5]| # 250..255
{range} # or a numeric range
)
""".format(range=numeric_range)
# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
# characters plus dashes (and underscores) or valid ranges. The label may not
# start or end with a hyphen or an underscore. This is interpolated into the
# hostname pattern below. We don't try to enforce the 63-char length limit.
label = r"""
(?:[\w]|{range}) # Starts with an alphanumeric or a range
(?:[\w_-]|{range})* # Then zero or more of the same or [_-]
(?<![_-]) # ...as long as it didn't end with [_-]
""".format(range=alphanumeric_range)
patterns = {
# This matches a square-bracketed expression with a port specification. What
# is inside the square brackets is validated later.
'bracketed_hostport': re.compile(
r"""^
\[(.+)\] # [host identifier]
:([0-9]+) # :port number
$
""", re.X
),
# This matches a bare IPv4 address or hostname (or host pattern including
# [x:y(:z)] ranges) with a port specification.
'hostport': re.compile(
r"""^
((?: # We want to match:
[^:\[\]] # (a non-range character
| # ...or...
\[[^\]]*\] # a complete bracketed expression)
)*) # repeated as many times as possible
:([0-9]+) # followed by a port number
$
""", re.X
),
# This matches an IPv4 address, but also permits range expressions.
'ipv4': re.compile(
r"""^
(?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
$
""".format(i4=ipv4_component), re.X | re.I
),
# This matches an IPv6 address, but also permits range expressions.
#
# This expression looks complex, but it really only spells out the various
# combinations in which the basic unit of an IPv6 address (0..ffff) can be
# written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
# as ::ffff:192.0.2.3.
#
# Note that we can't just use ipaddress.ip_address() because we also have to
# accept ranges in place of each component.
'ipv6': re.compile(
r"""^
(?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
(?:{0}:){{1,6}}:| # compressed variants, which are all
(?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
(?:{0}:){{2}}(?::{0}){{1,5}}|
(?:{0}:){{3}}(?::{0}){{1,4}}|
(?:{0}:){{4}}(?::{0}){{1,3}}|
(?:{0}:){{5}}(?::{0}){{1,2}}|
(?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
:(?::{0}){{1,6}}| # ::ffff(:ffff...)
{0}?::| # ffff::, ::
# ipv4-in-ipv6 variants
(?:0:){{6}}(?:{0}\.){{3}}{0}|
::(?:ffff:)?(?:{0}\.){{3}}{0}|
(?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
$
""".format(ipv6_component), re.X | re.I
),
# This matches a hostname or host pattern including [x:y(:z)] ranges.
#
# We roughly follow DNS rules here, but also allow ranges (and underscores).
# In the past, no systematic rules were enforced about inventory hostnames,
# but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
# various metacharacters anyway.
#
# We don't enforce DNS length restrictions here (63 characters per label,
# 253 characters total) or make any attempt to process IDNs.
'hostname': re.compile(
r"""^
{label} # We must have at least one label
(?:\.{label})* # Followed by zero or more .labels
$
""".format(label=label), re.X | re.I | re.UNICODE
),
}
def parse_address(address, allow_ranges=False):
"""
Takes a string and returns a (host, port) tuple. If the host is None, then
the string could not be parsed as a host identifier with an optional port
specification. If the port is None, then no port was specified.
The host identifier may be a hostname (qualified or not), an IPv4 address,
or an IPv6 address. If allow_ranges is True, then any of those may contain
[x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
The port number is an optional :NN suffix on an IPv4 address or host name,
or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
IPv4 address, or host name. (This means the only way to specify a port for
an IPv6 address is to enclose it in square brackets.)
"""
# First, we extract the port number if one is specified.
port = None
for matching in ['bracketed_hostport', 'hostport']:
m = patterns[matching].match(address)
if m:
(address, port) = m.groups()
port = int(port)
continue
# What we're left with now must be an IPv4 or IPv6 address, possibly with
# numeric ranges, or a hostname with alphanumeric ranges.
host = None
for matching in ['ipv4', 'ipv6', 'hostname']:
m = patterns[matching].match(address)
if m:
host = address
continue
# If it isn't any of the above, we don't understand it.
if not host:
raise AnsibleError("Not a valid network hostname: %s" % address)
# If we get to this point, we know that any included ranges are valid.
# If the caller is prepared to handle them, all is well.
# Otherwise we treat it as a parse failure.
if not allow_ranges and '[' in host:
raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
return (host, port)
| 8,083
|
Python
|
.py
| 181
| 37.928177
| 89
| 0.555471
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,037
|
jsonify.py
|
ansible_ansible/lib/ansible/parsing/utils/jsonify.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import json
def jsonify(result, format=False):
""" format JSON output (uncompressed or uncompressed) """
if result is None:
return "{}"
indent = None
if format:
indent = 4
try:
return json.dumps(result, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result, sort_keys=True, indent=indent)
| 1,149
|
Python
|
.py
| 29
| 36.448276
| 84
| 0.740341
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,038
|
__init__.py
|
ansible_ansible/lib/ansible/parsing/utils/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
| 749
|
Python
|
.py
| 17
| 43
| 70
| 0.77565
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,039
|
yaml.py
|
ansible_ansible/lib/ansible/parsing/utils/yaml.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import json
from yaml import YAMLError
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.module_utils.common.text.converters import to_native
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.parsing.ajson import AnsibleJSONDecoder
__all__ = ('from_yaml',)
def _handle_error(json_exc, yaml_exc, file_name, show_content):
"""
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
"""
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
n_yaml_syntax_error = YAML_SYNTAX_ERROR % to_native(getattr(yaml_exc, 'problem', u''))
n_err_msg = 'We were unable to read either as JSON nor YAML, these are the errors we got from each:\n' \
'JSON: %s\n\n%s' % (to_native(json_exc), n_yaml_syntax_error)
raise AnsibleParserError(n_err_msg, obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
def _safe_load(stream, file_name=None, vault_secrets=None):
""" Implements yaml.safe_load(), except using our custom loader class. """
loader = AnsibleLoader(stream, file_name, vault_secrets)
try:
return loader.get_single_data()
finally:
try:
loader.dispose()
except AttributeError:
pass # older versions of yaml don't have dispose function, ignore
def from_yaml(data, file_name='<string>', show_content=True, vault_secrets=None, json_only=False):
"""
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
"""
new_data = None
try:
# in case we have to deal with vaults
AnsibleJSONDecoder.set_secrets(vault_secrets)
# we first try to load this data as JSON.
# Fixes issues with extra vars json strings not being parsed correctly by the yaml parser
new_data = json.loads(data, cls=AnsibleJSONDecoder)
except Exception as json_exc:
if json_only:
raise AnsibleParserError(to_native(json_exc), orig_exc=json_exc)
# must not be JSON, let the rest try
try:
new_data = _safe_load(data, file_name=file_name, vault_secrets=vault_secrets)
except YAMLError as yaml_exc:
_handle_error(json_exc, yaml_exc, file_name, show_content)
return new_data
| 3,094
|
Python
|
.py
| 61
| 44.721311
| 108
| 0.710823
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,040
|
host.py
|
ansible_ansible/lib/ansible/inventory/host.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from collections.abc import Mapping, MutableMapping
from ansible.inventory.group import Group, InventoryObjectType
from ansible.parsing.utils.addresses import patterns
from ansible.utils.vars import combine_vars, get_unique_id
__all__ = ['Host']
class Host:
""" a single ansible host """
base_type = InventoryObjectType.HOST
# __slots__ = [ 'name', 'vars', 'groups' ]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def __eq__(self, other):
if not isinstance(other, Host):
return False
return self._uuid == other._uuid
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.get_name()
def __repr__(self):
return self.get_name()
def serialize(self):
groups = []
for group in self.groups:
groups.append(group.serialize())
return dict(
name=self.name,
vars=self.vars.copy(),
address=self.address,
uuid=self._uuid,
groups=groups,
implicit=self.implicit,
)
def deserialize(self, data):
self.__init__(gen_uuid=False) # used by __setstate__ to deserialize in place # pylint: disable=unnecessary-dunder-call
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.address = data.get('address', '')
self._uuid = data.get('uuid', None)
self.implicit = data.get('implicit', False)
groups = data.get('groups', [])
for group_data in groups:
g = Group()
g.deserialize(group_data)
self.groups.append(g)
def __init__(self, name=None, port=None, gen_uuid=True):
self.vars = {}
self.groups = []
self._uuid = None
self.name = name
self.address = name
if port:
self.set_variable('ansible_port', int(port))
if gen_uuid:
self._uuid = get_unique_id()
self.implicit = False
def get_name(self):
return self.name
def populate_ancestors(self, additions=None):
# populate ancestors
if additions is None:
for group in self.groups:
self.add_group(group)
else:
for group in additions:
if group not in self.groups:
self.groups.append(group)
def add_group(self, group):
added = False
# populate ancestors first
for oldg in group.get_ancestors():
if oldg not in self.groups:
self.groups.append(oldg)
# actually add group
if group not in self.groups:
self.groups.append(group)
added = True
return added
def remove_group(self, group):
removed = False
if group in self.groups:
self.groups.remove(group)
removed = True
# remove exclusive ancestors, xcept all!
for oldg in group.get_ancestors():
if oldg.name != 'all':
for childg in self.groups:
if oldg in childg.get_ancestors():
break
else:
self.remove_group(oldg)
return removed
def set_variable(self, key, value):
if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
self.vars = combine_vars(self.vars, {key: value})
else:
self.vars[key] = value
def get_groups(self):
return self.groups
def get_magic_vars(self):
results = {}
results['inventory_hostname'] = self.name
if patterns['ipv4'].match(self.name) or patterns['ipv6'].match(self.name):
results['inventory_hostname_short'] = self.name
else:
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([g.name for g in self.get_groups() if g.name != 'all'])
return results
def get_vars(self):
return combine_vars(self.vars, self.get_magic_vars())
| 5,037
|
Python
|
.py
| 131
| 29.503817
| 128
| 0.602177
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,041
|
manager.py
|
ansible_ansible/lib/ansible/inventory/manager.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import annotations
import fnmatch
import os
import sys
import re
import itertools
import traceback
from operator import attrgetter
from random import shuffle
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.inventory.data import InventoryData
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.loader import inventory_loader
from ansible.utils.helpers import deduplicate_list
from ansible.utils.path import unfrackpath
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
from ansible.vars.plugins import get_vars_from_inventory_sources
display = Display()
IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS]
IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS]
IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS))
PATTERN_WITH_SUBSCRIPT = re.compile(
r"""^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+)| # A single positive or negative number
([0-9]+)([:-]) # Or an x:y or x: range.
([0-9]*)
)\]
$
""", re.X
)
def order_patterns(patterns):
""" takes a list of patterns and reorders them by modifier to apply them consistently """
# FIXME: this goes away if we apply patterns incrementally or by groups
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if not p:
continue
if p[0] == "!":
pattern_exclude.append(p)
elif p[0] == "&":
pattern_intersection.append(p)
else:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def split_host_pattern(pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
results = (split_host_pattern(p) for p in pattern)
# flatten the results
return list(itertools.chain.from_iterable(results))
elif not isinstance(pattern, string_types):
pattern = to_text(pattern, errors='surrogate_or_strict')
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
if u',' in pattern:
patterns = pattern.split(u',')
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except Exception:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
to_text(r"""(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
"""), pattern, re.X
)
return [p.strip() for p in patterns if p.strip()]
class InventoryManager(object):
""" Creates and manages inventory """
def __init__(self, loader, sources=None, parse=True, cache=True):
# base objects
self._loader = loader
self._inventory = InventoryData()
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# caches
self._hosts_patterns_cache = {} # resolved full patterns
self._pattern_cache = {} # resolved individual patterns
# the inventory dirs, files, script paths or lists of hosts
if sources is None:
self._sources = []
elif isinstance(sources, string_types):
self._sources = [sources]
else:
self._sources = sources
# get to work!
if parse:
self.parse_sources(cache=cache)
self._cached_dynamic_hosts = []
self._cached_dynamic_grouping = []
@property
def localhost(self):
return self._inventory.get_host('localhost')
@property
def groups(self):
return self._inventory.groups
@property
def hosts(self):
return self._inventory.hosts
def add_host(self, host, group=None, port=None):
return self._inventory.add_host(host, group, port)
def add_group(self, group):
return self._inventory.add_group(group)
def get_groups_dict(self):
return self._inventory.get_groups_dict()
def reconcile_inventory(self):
self.clear_caches()
return self._inventory.reconcile_inventory()
def get_host(self, hostname):
return self._inventory.get_host(hostname)
def _fetch_inventory_plugins(self):
""" sets up loaded inventory plugins for usage """
display.vvvv('setting up inventory plugins')
plugins = []
for name in C.INVENTORY_ENABLED:
plugin = inventory_loader.get(name)
if plugin:
plugins.append(plugin)
else:
display.warning('Failed to load inventory plugin, skipping %s' % name)
if not plugins:
raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one enabled.")
return plugins
def parse_sources(self, cache=False):
""" iterate over inventory sources and parse each one to populate it"""
parsed = False
# allow for multiple inventory parsing
for source in self._sources:
if source:
if ',' not in source:
source = unfrackpath(source, follow=False)
parse = self.parse_source(source, cache=cache)
if parse and not parsed:
parsed = True
if parsed:
# do post processing
self._inventory.reconcile_inventory()
else:
if C.INVENTORY_UNPARSED_IS_FAILED:
raise AnsibleError("No inventory was parsed, please check your configuration and options.")
elif C.INVENTORY_UNPARSED_WARNING:
display.warning("No inventory was parsed, only implicit localhost is available")
for group in self.groups.values():
group.vars = combine_vars(group.vars, get_vars_from_inventory_sources(self._loader, self._sources, [group], 'inventory'))
for host in self.hosts.values():
host.vars = combine_vars(host.vars, get_vars_from_inventory_sources(self._loader, self._sources, [host], 'inventory'))
def parse_source(self, source, cache=False):
""" Generate or update inventory for the source provided """
parsed = False
failures = []
display.debug(u'Examining possible inventory source: %s' % source)
# use binary for path functions
b_source = to_bytes(source)
# process directories as a collection of inventories
if os.path.isdir(b_source):
display.debug(u'Searching for inventory files in directory: %s' % source)
for i in sorted(os.listdir(b_source)):
display.debug(u'Considering %s' % i)
# Skip hidden files and stuff we explicitly ignore
if IGNORED.search(i):
continue
# recursively deal with directory entries
fullpath = to_text(os.path.join(b_source, i), errors='surrogate_or_strict')
parsed_this_one = self.parse_source(fullpath, cache=cache)
display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
if not parsed:
parsed = parsed_this_one
else:
# left with strings or files, let plugins figure it out
# set so new hosts can use for inventory_file/dir vars
self._inventory.current_source = source
# try source with each plugin
for plugin in self._fetch_inventory_plugins():
plugin_name = to_text(getattr(plugin, '_load_name', getattr(plugin, '_original_path', '')))
display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path))
# initialize and figure out if plugin wants to attempt parsing this file
try:
plugin_wants = bool(plugin.verify_file(source))
except Exception:
plugin_wants = False
if plugin_wants:
try:
# FIXME in case plugin fails 1/2 way we have partial inventory
plugin.parse(self._inventory, self._loader, source, cache=cache)
try:
plugin.update_cache_if_changed()
except AttributeError:
# some plugins might not implement caching
pass
parsed = True
display.vvv('Parsed %s inventory source with %s plugin' % (source, plugin_name))
break
except AnsibleParserError as e:
display.debug('%s was not parsable by %s' % (source, plugin_name))
tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
failures.append({'src': source, 'plugin': plugin_name, 'exc': e, 'tb': tb})
except Exception as e:
display.debug('%s failed while attempting to parse %s' % (plugin_name, source))
tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
failures.append({'src': source, 'plugin': plugin_name, 'exc': AnsibleError(e), 'tb': tb})
else:
display.vvv("%s declined parsing %s as it did not pass its verify_file() method" % (plugin_name, source))
if parsed:
self._inventory.processed_sources.append(self._inventory.current_source)
else:
# only warn/error if NOT using the default or using it and the file is present
# TODO: handle 'non file' inventory and detect vs hardcode default
if source != '/etc/ansible/hosts' or os.path.exists(source):
if failures:
# only if no plugin processed files should we show errors.
for fail in failures:
display.warning(u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc'])))
if 'tb' in fail:
display.vvv(to_text(fail['tb']))
# final error/warning on inventory source failure
if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
raise AnsibleError(u'Completely failed to parse inventory source %s' % (source))
else:
display.warning("Unable to parse %s as an inventory source" % source)
# clear up, jic
self._inventory.current_source = None
return parsed
def clear_caches(self):
""" clear all caches """
self._hosts_patterns_cache = {}
self._pattern_cache = {}
def refresh_inventory(self):
""" recalculate inventory """
self.clear_caches()
self._inventory = InventoryData()
self.parse_sources(cache=False)
for host in self._cached_dynamic_hosts:
self.add_dynamic_host(host, {'refresh': True})
for host, result in self._cached_dynamic_grouping:
result['refresh'] = True
self.add_dynamic_group(host, result)
def _match_list(self, items, pattern_str):
# compile patterns
try:
if not pattern_str[0] == '~':
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception:
raise AnsibleError('Invalid host list pattern: %s' % pattern_str)
# apply patterns
results = []
for item in items:
if pattern.match(item):
results.append(item)
return results
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
hosts = []
# Check if pattern already computed
if isinstance(pattern, list):
pattern_list = pattern[:]
else:
pattern_list = [pattern]
if pattern_list:
if not ignore_limits and self._subset:
pattern_list.extend(self._subset)
if not ignore_restrictions and self._restriction:
pattern_list.extend(self._restriction)
# This is only used as a hash key in the self._hosts_patterns_cache dict
# a tuple is faster than stringifying
pattern_hash = tuple(pattern_list)
if pattern_hash not in self._hosts_patterns_cache:
patterns = split_host_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits and self._subset:
# exclude hosts not in a subset, if defined
subset_uuids = set(s._uuid for s in self._evaluate_patterns(self._subset))
hosts = [h for h in hosts if h._uuid in subset_uuids]
if not ignore_restrictions and self._restriction:
# exclude hosts mentioned in any restriction (ex: failed hosts)
hosts = [h for h in hosts if h.name in self._restriction]
self._hosts_patterns_cache[pattern_hash] = deduplicate_list(hosts)
# sort hosts list if needed (should only happen when called from strategy)
if order in ['sorted', 'reverse_sorted']:
hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted'))
elif order == 'reverse_inventory':
hosts = self._hosts_patterns_cache[pattern_hash][::-1]
else:
hosts = self._hosts_patterns_cache[pattern_hash][:]
if order == 'shuffle':
shuffle(hosts)
elif order not in [None, 'inventory']:
raise AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order)
return hosts
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
patterns = order_patterns(patterns)
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._inventory.hosts:
hosts.append(self._inventory.get_host(p))
else:
that = self._match_one_pattern(p)
if p[0] == "!":
that = set(that)
hosts = [h for h in hosts if h not in that]
elif p[0] == "&":
that = set(that)
hosts = [h for h in hosts if h in that]
else:
existing_hosts = set(y.name for y in hosts)
hosts.extend([h for h in that if h.name not in existing_hosts])
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern[0] in ("&", "!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern[0] == '~':
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
subscript = None
m = PATTERN_WITH_SUBSCRIPT.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
if not end:
end = -1
subscript = (int(start), int(end))
if sep == '-':
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
if end == -1:
end = len(hosts) - 1
return hosts[start:end + 1]
else:
return [hosts[start]]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
# check if pattern matches group
matching_groups = self._match_list(self._inventory.groups, pattern)
if matching_groups:
for groupname in matching_groups:
results.extend(self._inventory.groups[groupname].get_hosts())
# check hosts if no groups matched or it is a regex/glob pattern
if not matching_groups or pattern[0] == '~' or any(special in pattern for special in ('.', '?', '*', '[')):
# pattern might match host
matching_hosts = self._match_list(self._inventory.hosts, pattern)
if matching_hosts:
for hostname in matching_hosts:
results.append(self._inventory.hosts[hostname])
if not results and pattern in C.LOCALHOST:
# get_host autocreates implicit when needed
implicit = self._inventory.get_host(pattern)
if implicit:
results.append(implicit)
# Display warning if specified host pattern did not match any groups or hosts
if not results and not matching_groups and pattern != 'all':
msg = "Could not match supplied host pattern, ignoring: %s" % pattern
display.debug(msg)
if C.HOST_PATTERN_MISMATCH == 'warning':
display.warning(msg)
elif C.HOST_PATTERN_MISMATCH == 'error':
raise AnsibleError(msg)
# no need to write 'ignore' state
return results
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
# FIXME: cache?
result = self.get_hosts(pattern)
# allow implicit localhost if pattern matches and no other results
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
# FIXME: cache?
return sorted(self._inventory.groups.keys())
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [restriction]
self._restriction = set(to_text(h.name) for h in restriction)
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_patterns = split_host_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if not x:
continue
if x[0] == "@":
b_limit_file = to_bytes(x[1:])
if not os.path.exists(b_limit_file):
raise AnsibleError(u'Unable to find limit file %s' % b_limit_file)
if not os.path.isfile(b_limit_file):
raise AnsibleError(u'Limit starting with "@" must be a file, not a directory: %s' % b_limit_file)
with open(b_limit_file) as fd:
results.extend([to_text(l.strip()) for l in fd.read().split("\n")])
else:
results.append(to_text(x))
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def clear_pattern_cache(self):
self._pattern_cache = {}
def add_dynamic_host(self, host_info, result_item):
"""
Helper function to add a new host to inventory based on a task result.
"""
changed = False
if not result_item.get('refresh'):
self._cached_dynamic_hosts.append(host_info)
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if host_name not in self.hosts:
self.add_host(host_name, 'all')
changed = True
new_host = self.hosts.get(host_name)
# Set/update the vars for this host
new_host_vars = new_host.get_vars()
new_host_combined_vars = combine_vars(new_host_vars, host_info.get('host_vars', dict()))
if new_host_vars != new_host_combined_vars:
new_host.vars = new_host_combined_vars
changed = True
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self.groups:
group_name = self._inventory.add_group(group_name)
changed = True
new_group = self.groups[group_name]
if new_group.add_host(self.hosts[host_name]):
changed = True
# reconcile inventory, ensures inventory rules are followed
if changed:
self.reconcile_inventory()
result_item['changed'] = changed
def add_dynamic_group(self, host, result_item):
"""
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
"""
changed = False
if not result_item.get('refresh'):
self._cached_dynamic_grouping.append((host, result_item))
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self.hosts.get(host.name)
if real_host is None:
if host.name == self.localhost.name:
real_host = self.localhost
elif not result_item.get('refresh'):
raise AnsibleError('%s cannot be matched in inventory' % host.name)
else:
# host was removed from inventory during refresh, we should not process
return
group_name = result_item.get('add_group')
parent_group_names = result_item.get('parent_groups', [])
if group_name not in self.groups:
group_name = self.add_group(group_name)
for name in parent_group_names:
if name not in self.groups:
# create the new group and add it to inventory
self.add_group(name)
changed = True
group = self._inventory.groups[group_name]
for parent_group_name in parent_group_names:
parent_group = self.groups[parent_group_name]
new = parent_group.add_child_group(group)
if new and not changed:
changed = True
if real_host not in group.get_hosts():
changed = group.add_host(real_host)
if group not in real_host.get_groups():
changed = real_host.add_group(group)
if changed:
self.reconcile_inventory()
result_item['changed'] = changed
| 29,454
|
Python
|
.py
| 608
| 36.827303
| 148
| 0.592935
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,042
|
helpers.py
|
ansible_ansible/lib/ansible/inventory/helpers.py
|
# (c) 2017, Ansible by RedHat Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import annotations
from ansible.utils.vars import combine_vars
def sort_groups(groups):
return sorted(groups, key=lambda g: (g.depth, g.priority, g.name))
def get_group_vars(groups):
"""
Combine all the group vars from a list of inventory groups.
:param groups: list of ansible.inventory.group.Group objects
:rtype: dict
"""
results = {}
for group in sort_groups(groups):
results = combine_vars(results, group.get_vars())
return results
| 1,240
|
Python
|
.py
| 31
| 37.322581
| 70
| 0.717735
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,043
|
data.py
|
ansible_ansible/lib/ansible/inventory/data.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import annotations
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.module_utils.six import string_types
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
from ansible.utils.path import basedir
display = Display()
class InventoryData(object):
"""
Holds inventory data (host and group objects).
Using it's methods should guarantee expected relationships and data.
"""
def __init__(self):
self.groups = {}
self.hosts = {}
# provides 'groups' magic var, host object has group_names
self._groups_dict_cache = {}
# current localhost, implicit or explicit
self.localhost = None
self.current_source = None
self.processed_sources = []
# Always create the 'all' and 'ungrouped' groups,
for group in ('all', 'ungrouped'):
self.add_group(group)
self.add_child('all', 'ungrouped')
def serialize(self):
self._groups_dict_cache = None
data = {
'groups': self.groups,
'hosts': self.hosts,
'local': self.localhost,
'source': self.current_source,
'processed_sources': self.processed_sources
}
return data
def deserialize(self, data):
self._groups_dict_cache = {}
self.hosts = data.get('hosts')
self.groups = data.get('groups')
self.localhost = data.get('local')
self.current_source = data.get('source')
self.processed_sources = data.get('processed_sources')
def _create_implicit_localhost(self, pattern):
if self.localhost:
new_host = self.localhost
else:
new_host = Host(pattern)
new_host.address = "127.0.0.1"
new_host.implicit = True
# set localhost defaults
py_interp = sys.executable
if not py_interp:
# sys.executable is not set in some cornercases. see issue #13585
py_interp = '/usr/bin/python'
display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. '
'You can correct this by setting ansible_python_interpreter for localhost')
new_host.set_variable("ansible_python_interpreter", py_interp)
new_host.set_variable("ansible_connection", 'local')
self.localhost = new_host
return new_host
def reconcile_inventory(self):
""" Ensure inventory basic rules, run after updates """
display.debug('Reconcile groups and hosts in inventory.')
self.current_source = None
group_names = set()
# set group vars from group_vars/ files and vars plugins
for g in self.groups:
group = self.groups[g]
group_names.add(group.name)
# ensure all groups inherit from 'all'
if group.name != 'all' and not group.get_ancestors():
self.add_child('all', group.name)
host_names = set()
# get host vars from host_vars/ files and vars plugins
for host in self.hosts.values():
host_names.add(host.name)
mygroups = host.get_groups()
if self.groups['ungrouped'] in mygroups:
# clear ungrouped of any incorrectly stored by parser
if set(mygroups).difference(set([self.groups['all'], self.groups['ungrouped']])):
self.groups['ungrouped'].remove_host(host)
elif not host.implicit:
# add ungrouped hosts to ungrouped, except implicit
length = len(mygroups)
if length == 0 or (length == 1 and self.groups['all'] in mygroups):
self.add_child('ungrouped', host.name)
# special case for implicit hosts
if host.implicit:
host.vars = combine_vars(self.groups['all'].get_vars(), host.vars)
# warn if overloading identifier as both group and host
for conflict in group_names.intersection(host_names):
display.warning("Found both group and host with same name: %s" % conflict)
self._groups_dict_cache = {}
def get_host(self, hostname):
""" fetch host object using name deal with implicit localhost """
matching_host = self.hosts.get(hostname, None)
# if host is not in hosts dict
if matching_host is None and hostname in C.LOCALHOST:
# might need to create implicit localhost
matching_host = self._create_implicit_localhost(hostname)
return matching_host
def add_group(self, group):
""" adds a group to inventory if not there already, returns named actually used """
if group:
if not isinstance(group, string_types):
raise AnsibleError("Invalid group name supplied, expected a string but got %s for %s" % (type(group), group))
if group not in self.groups:
g = Group(group)
if g.name not in self.groups:
self.groups[g.name] = g
self._groups_dict_cache = {}
display.debug("Added group %s to inventory" % group)
group = g.name
else:
display.debug("group %s already in inventory" % group)
else:
raise AnsibleError("Invalid empty/false group name provided: %s" % group)
return group
def remove_group(self, group):
if group in self.groups:
del self.groups[group]
display.debug("Removed group %s from inventory" % group)
self._groups_dict_cache = {}
for host in self.hosts:
h = self.hosts[host]
h.remove_group(group)
def add_host(self, host, group=None, port=None):
""" adds a host to inventory and possibly a group if not there already """
if host:
if not isinstance(host, string_types):
raise AnsibleError("Invalid host name supplied, expected a string but got %s for %s" % (type(host), host))
# TODO: add to_safe_host_name
g = None
if group:
if group in self.groups:
g = self.groups[group]
else:
raise AnsibleError("Could not find group %s in inventory" % group)
if host not in self.hosts:
h = Host(host, port)
self.hosts[host] = h
if self.current_source: # set to 'first source' in which host was encountered
self.set_variable(host, 'inventory_file', self.current_source)
self.set_variable(host, 'inventory_dir', basedir(self.current_source))
else:
self.set_variable(host, 'inventory_file', None)
self.set_variable(host, 'inventory_dir', None)
display.debug("Added host %s to inventory" % (host))
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if host in C.LOCALHOST:
if self.localhost is None:
self.localhost = self.hosts[host]
display.vvvv("Set default localhost to %s" % h)
else:
display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
else:
h = self.hosts[host]
if g:
g.add_host(h)
self._groups_dict_cache = {}
display.debug("Added host %s to group %s" % (host, group))
else:
raise AnsibleError("Invalid empty host name provided: %s" % host)
return host
def remove_host(self, host):
if host.name in self.hosts:
del self.hosts[host.name]
for group in self.groups:
g = self.groups[group]
g.remove_host(host)
def set_variable(self, entity, varname, value):
""" sets a variable for an inventory object """
if entity in self.groups:
inv_object = self.groups[entity]
elif entity in self.hosts:
inv_object = self.hosts[entity]
else:
raise AnsibleError("Could not identify group or host named %s" % entity)
inv_object.set_variable(varname, value)
display.debug('set %s for %s' % (varname, entity))
def add_child(self, group, child):
""" Add host or group to group """
added = False
if group in self.groups:
g = self.groups[group]
if child in self.groups:
added = g.add_child_group(self.groups[child])
elif child in self.hosts:
added = g.add_host(self.hosts[child])
else:
raise AnsibleError("%s is not a known host nor group" % child)
self._groups_dict_cache = {}
display.debug('Group %s now contains %s' % (group, child))
else:
raise AnsibleError("%s is not a known group" % group)
return added
def get_groups_dict(self):
"""
We merge a 'magic' var 'groups' with group name keys and hostname list values into every host variable set. Cache for speed.
"""
if not self._groups_dict_cache:
for (group_name, group) in self.groups.items():
self._groups_dict_cache[group_name] = [h.name for h in group.get_hosts()]
return self._groups_dict_cache
| 10,586
|
Python
|
.py
| 225
| 35.875556
| 147
| 0.597244
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,044
|
group.py
|
ansible_ansible/lib/ansible/inventory/group.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from collections.abc import Mapping, MutableMapping
from enum import Enum
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
def to_safe_group_name(name, replacer="_", force=False, silent=False):
# Converts 'bad' characters in a string to underscores (or provided replacer) so they can be used as Ansible hosts or groups
warn = ''
if name: # when deserializing we might not have name yet
invalid_chars = C.INVALID_VARIABLE_NAMES.findall(name)
if invalid_chars:
msg = 'invalid character(s) "%s" in group name (%s)' % (to_text(set(invalid_chars)), to_text(name))
if C.TRANSFORM_INVALID_GROUP_CHARS not in ('never', 'ignore') or force:
name = C.INVALID_VARIABLE_NAMES.sub(replacer, name)
if not (silent or C.TRANSFORM_INVALID_GROUP_CHARS == 'silently'):
display.vvvv('Replacing ' + msg)
warn = 'Invalid characters were found in group names and automatically replaced, use -vvvv to see details'
else:
if C.TRANSFORM_INVALID_GROUP_CHARS == 'never':
display.vvvv('Not replacing %s' % msg)
warn = 'Invalid characters were found in group names but not replaced, use -vvvv to see details'
if warn:
display.warning(warn)
return name
class InventoryObjectType(Enum):
HOST = 0
GROUP = 1
class Group:
""" a group of ansible hosts """
base_type = InventoryObjectType.GROUP
# __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
self.depth = 0
self.name = to_safe_group_name(name)
self.hosts = []
self._hosts = None
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
self.priority = 1
def __repr__(self):
return self.get_name()
def __str__(self):
return self.get_name()
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def serialize(self):
parent_groups = []
for parent in self.parent_groups:
parent_groups.append(parent.serialize())
self._hosts = None
result = dict(
name=self.name,
vars=self.vars.copy(),
parent_groups=parent_groups,
depth=self.depth,
hosts=self.hosts,
)
return result
def deserialize(self, data):
self.__init__() # used by __setstate__ to deserialize in place # pylint: disable=unnecessary-dunder-call
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.depth = data.get('depth', 0)
self.hosts = data.get('hosts', [])
self._hosts = None
parent_groups = data.get('parent_groups', [])
for parent_data in parent_groups:
g = Group()
g.deserialize(parent_data)
self.parent_groups.append(g)
def _walk_relationship(self, rel, include_self=False, preserve_ordering=False):
"""
Given `rel` that is an iterable property of Group,
consitituting a directed acyclic graph among all groups,
Returns a set of all groups in full tree
A B C
| / | /
| / | /
D -> E
| / vertical connections
| / are directed upward
F
Called on F, returns set of (A, B, C, D, E)
"""
seen = set([])
unprocessed = set(getattr(self, rel))
if include_self:
unprocessed.add(self)
if preserve_ordering:
ordered = [self] if include_self else []
ordered.extend(getattr(self, rel))
while unprocessed:
seen.update(unprocessed)
new_unprocessed = set([])
for new_item in chain.from_iterable(getattr(g, rel) for g in unprocessed):
new_unprocessed.add(new_item)
if preserve_ordering:
if new_item not in seen:
ordered.append(new_item)
new_unprocessed.difference_update(seen)
unprocessed = new_unprocessed
if preserve_ordering:
return ordered
return seen
def get_ancestors(self):
return self._walk_relationship('parent_groups')
def get_descendants(self, **kwargs):
return self._walk_relationship('child_groups', **kwargs)
@property
def host_names(self):
if self._hosts is None:
self._hosts = set(self.hosts)
return self._hosts
def get_name(self):
return self.name
def add_child_group(self, group):
added = False
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if group not in self.child_groups:
# prepare list of group's new ancestors this edge creates
start_ancestors = group.get_ancestors()
new_ancestors = self.get_ancestors()
if group in new_ancestors:
raise AnsibleError("Adding group '%s' as child to '%s' creates a recursive dependency loop." % (to_native(group.name), to_native(self.name)))
new_ancestors.add(self)
new_ancestors.difference_update(start_ancestors)
added = True
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth + 1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
if self.name not in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
for h in group.get_hosts():
h.populate_ancestors(additions=new_ancestors)
self.clear_hosts_cache()
return added
def _check_children_depth(self):
depth = self.depth
start_depth = self.depth # self.depth could change over loop
seen = set([])
unprocessed = set(self.child_groups)
while unprocessed:
seen.update(unprocessed)
depth += 1
to_process = unprocessed.copy()
unprocessed = set([])
for g in to_process:
if g.depth < depth:
g.depth = depth
unprocessed.update(g.child_groups)
if depth - start_depth > len(seen):
raise AnsibleError("The group named '%s' has a recursive dependency loop." % to_native(self.name))
def add_host(self, host):
added = False
if host.name not in self.host_names:
self.hosts.append(host)
self._hosts.add(host.name)
host.add_group(self)
self.clear_hosts_cache()
added = True
return added
def remove_host(self, host):
removed = False
if host.name in self.host_names:
self.hosts.remove(host)
self._hosts.remove(host.name)
host.remove_group(self)
self.clear_hosts_cache()
removed = True
return removed
def set_variable(self, key, value):
if key == 'ansible_group_priority':
self.set_priority(int(value))
else:
if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
self.vars = combine_vars(self.vars, {key: value})
else:
self.vars[key] = value
def clear_hosts_cache(self):
self._hosts_cache = None
for g in self.get_ancestors():
g._hosts_cache = None
def get_hosts(self):
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
hosts = []
seen = {}
for kid in self.get_descendants(include_self=True, preserve_ordering=True):
kid_hosts = kid.hosts
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
if self.name == 'all' and kk.implicit:
continue
hosts.append(kk)
return hosts
def get_vars(self):
return self.vars.copy()
def set_priority(self, priority):
try:
self.priority = int(priority)
except TypeError:
# FIXME: warn about invalid priority
pass
| 9,752
|
Python
|
.py
| 237
| 31.21519
| 157
| 0.597378
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,045
|
fact_cache.py
|
ansible_ansible/lib/ansible/vars/fact_cache.py
|
# Copyright: (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from collections.abc import MutableMapping
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.loader import cache_loader
from ansible.utils.display import Display
display = Display()
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
self._plugin = cache_loader.get(C.CACHE_PLUGIN)
if not self._plugin:
raise AnsibleError('Unable to load the facts cache plugin (%s).' % (C.CACHE_PLUGIN))
super(FactCache, self).__init__(*args, **kwargs)
def __getitem__(self, key):
if not self._plugin.contains(key):
raise KeyError
return self._plugin.get(key)
def __setitem__(self, key, value):
self._plugin.set(key, value)
def __delitem__(self, key):
self._plugin.delete(key)
def __contains__(self, key):
return self._plugin.contains(key)
def __iter__(self):
return iter(self._plugin.keys())
def __len__(self):
return len(self._plugin.keys())
def copy(self):
""" Return a primitive copy of the keys and values from the cache. """
return dict(self)
def keys(self):
return self._plugin.keys()
def flush(self):
""" Flush the fact cache of all keys. """
self._plugin.flush()
def first_order_merge(self, key, value):
host_facts = {key: value}
try:
host_cache = self._plugin.get(key)
if host_cache:
host_cache.update(value)
host_facts[key] = host_cache
except KeyError:
pass
super(FactCache, self).update(host_facts)
| 1,903
|
Python
|
.py
| 48
| 32.166667
| 96
| 0.635371
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,046
|
plugins.py
|
ansible_ansible/lib/ansible/vars/plugins.py
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
from functools import lru_cache
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.group import InventoryObjectType
from ansible.plugins.loader import vars_loader
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
def _prime_vars_loader():
# find 3rd party legacy vars plugins once, and look them up by name subsequently
list(vars_loader.all(class_only=True))
for plugin_name in C.VARIABLE_PLUGINS_ENABLED:
if not plugin_name:
continue
vars_loader.get(plugin_name)
def get_plugin_vars(loader, plugin, path, entities):
data = {}
try:
data = plugin.get_vars(loader, path, entities)
except AttributeError:
if hasattr(plugin, 'get_host_vars') or hasattr(plugin, 'get_group_vars'):
display.deprecated(
f"The vars plugin {plugin.ansible_name} from {plugin._original_path} is relying "
"on the deprecated entrypoints 'get_host_vars' and 'get_group_vars'. "
"This plugin should be updated to inherit from BaseVarsPlugin and define "
"a 'get_vars' method as the main entrypoint instead.",
version="2.20",
)
try:
for entity in entities:
if entity.base_type is InventoryObjectType.HOST:
data |= plugin.get_host_vars(entity.name)
else:
data |= plugin.get_group_vars(entity.name)
except AttributeError:
if hasattr(plugin, 'run'):
raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
else:
raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
return data
# optimized for stateless plugins; non-stateless plugin instances will fall out quickly
@lru_cache(maxsize=10)
def _plugin_should_run(plugin, stage):
# if a plugin-specific setting has not been provided, use the global setting
# older/non shipped plugins that don't support the plugin-specific setting should also use the global setting
allowed_stages = None
try:
allowed_stages = plugin.get_option('stage')
except (AttributeError, KeyError):
pass
if allowed_stages:
return allowed_stages in ('all', stage)
# plugin didn't declare a preference; consult global config
config_stage_override = C.RUN_VARS_PLUGINS
if config_stage_override == 'demand' and stage == 'inventory':
return False
elif config_stage_override == 'start' and stage == 'task':
return False
return True
def get_vars_from_path(loader, path, entities, stage):
data = {}
if vars_loader._paths is None:
# cache has been reset, reload all()
_prime_vars_loader()
for plugin_name in vars_loader._plugin_instance_cache:
if (plugin := vars_loader.get(plugin_name)) is None:
continue
collection = '.' in plugin.ansible_name and not plugin.ansible_name.startswith('ansible.builtin.')
# Warn if a collection plugin has REQUIRES_ENABLED because it has no effect.
if collection and hasattr(plugin, 'REQUIRES_ENABLED'):
display.warning(
"Vars plugins in collections must be enabled to be loaded, REQUIRES_ENABLED is not supported. "
"This should be removed from the plugin %s." % plugin.ansible_name
)
if not _plugin_should_run(plugin, stage):
continue
if (new_vars := get_plugin_vars(loader, plugin, path, entities)) != {}:
data = combine_vars(data, new_vars)
return data
def get_vars_from_inventory_sources(loader, sources, entities, stage):
data = {}
for path in sources:
if path is None:
continue
if ',' in path and not os.path.exists(path): # skip host lists
continue
elif not os.path.isdir(path):
# always pass the directory of the inventory source file
path = os.path.dirname(path)
if (new_vars := get_vars_from_path(loader, path, entities, stage)) != {}:
data = combine_vars(data, new_vars)
return data
| 4,503
|
Python
|
.py
| 96
| 38.40625
| 124
| 0.657913
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,047
|
hostvars.py
|
ansible_ansible/lib/ansible/vars/hostvars.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from collections.abc import Mapping
from functools import cached_property
from ansible import constants as C
from ansible.template import Templar, AnsibleUndefined
__all__ = ['HostVars', 'HostVarsVars']
# Note -- this is a Mapping, not a MutableMapping
class HostVars(Mapping):
""" A special view of vars_cache that adds values from the inventory when needed. """
def __init__(self, inventory, variable_manager, loader):
self._inventory = inventory
self._loader = loader
self._variable_manager = variable_manager
variable_manager._hostvars = self
def set_variable_manager(self, variable_manager):
self._variable_manager = variable_manager
variable_manager._hostvars = self
def set_inventory(self, inventory):
self._inventory = inventory
def _find_host(self, host_name):
# does not use inventory.hosts so it can create localhost on demand
return self._inventory.get_host(host_name)
def raw_get(self, host_name):
"""
Similar to __getitem__, however the returned data is not run through
the templating engine to expand variables in the hostvars.
"""
host = self._find_host(host_name)
if host is None:
return AnsibleUndefined(name="hostvars['%s']" % host_name)
return self._variable_manager.get_vars(host=host, include_hostvars=False)
def __setstate__(self, state):
self.__dict__.update(state)
# Methods __getstate__ and __setstate__ of VariableManager do not
# preserve _loader and _hostvars attributes to improve pickle
# performance and memory utilization. Since HostVars holds values
# of those attributes already, assign them if needed.
if self._variable_manager._loader is None:
self._variable_manager._loader = self._loader
if self._variable_manager._hostvars is None:
self._variable_manager._hostvars = self
def __getitem__(self, host_name):
data = self.raw_get(host_name)
if isinstance(data, AnsibleUndefined):
return data
return HostVarsVars(data, loader=self._loader)
def set_host_variable(self, host, varname, value):
self._variable_manager.set_host_variable(host, varname, value)
def set_nonpersistent_facts(self, host, facts):
self._variable_manager.set_nonpersistent_facts(host, facts)
def set_host_facts(self, host, facts):
self._variable_manager.set_host_facts(host, facts)
def __contains__(self, host_name):
# does not use inventory.hosts so it can create localhost on demand
return self._find_host(host_name) is not None
def __iter__(self):
yield from self._inventory.hosts
def __len__(self):
return len(self._inventory.hosts)
def __repr__(self):
out = {}
for host in self._inventory.hosts:
out[host] = self.get(host)
return repr(out)
def __deepcopy__(self, memo):
# We do not need to deepcopy because HostVars is immutable,
# however we have to implement the method so we can deepcopy
# variables' dicts that contain HostVars.
return self
class HostVarsVars(Mapping):
def __init__(self, variables, loader):
self._vars = variables
self._loader = loader
@cached_property
def _templar(self):
# NOTE: this only has access to the host's own vars,
# so templates that depend on vars in other scopes will not work.
return Templar(variables=self._vars, loader=self._loader)
def __getitem__(self, var):
return self._templar.template(self._vars[var], fail_on_undefined=False, static_vars=C.INTERNAL_STATIC_VARS)
def __contains__(self, var):
return (var in self._vars)
def __iter__(self):
yield from self._vars.keys()
def __len__(self):
return len(self._vars.keys())
def __repr__(self):
return repr(self._templar.template(self._vars, fail_on_undefined=False, static_vars=C.INTERNAL_STATIC_VARS))
def __getstate__(self):
""" override serialization here to avoid
pickle issues with templar and Jinja native"""
state = self.__dict__.copy()
state.pop('_templar', None)
return state
| 5,070
|
Python
|
.py
| 110
| 39.281818
| 116
| 0.678173
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,048
|
manager.py
|
ansible_ansible/lib/ansible/vars/manager.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import sys
from collections import defaultdict
from collections.abc import Mapping, MutableMapping, Sequence
from hashlib import sha1
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError
from ansible.inventory.host import Host
from ansible.inventory.helpers import sort_groups, get_group_vars
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import text_type
from ansible.vars.fact_cache import FactCache
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars, load_extra_vars, load_options_vars
from ansible.utils.unsafe_proxy import wrap_var
from ansible.vars.clean import namespace_facts, clean_facts
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
display = Display()
def preprocess_vars(a):
"""
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
"""
if a is None:
return None
elif not isinstance(a, list):
data = [a]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
class VariableManager:
_ALLOWED = frozenset(['plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory',
'all_plugins_play', 'all_plugins_inventory', 'all_inventory'])
def __init__(self, loader=None, inventory=None, version_info=None):
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = inventory
self._loader = loader
self._hostvars = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self._options_vars = load_options_vars(version_info)
# If the basedir is specified as the empty string then it results in cwd being used.
# This is not a safe location to load vars from.
basedir = self._options_vars.get('basedir', False)
self.safe_basedir = bool(basedir is False or basedir)
# load extra vars
self._extra_vars = load_extra_vars(loader=self._loader)
# load fact cache
try:
self._fact_cache = FactCache()
except AnsibleError as e:
# bad cache plugin is not fatal error
# fallback to a dict as in memory cache
display.warning(to_text(e))
self._fact_cache = {}
def __getstate__(self):
data = dict(
fact_cache=self._fact_cache,
np_fact_cache=self._nonpersistent_fact_cache,
vars_cache=self._vars_cache,
extra_vars=self._extra_vars,
host_vars_files=self._host_vars_files,
group_vars_files=self._group_vars_files,
omit_token=self._omit_token,
options_vars=self._options_vars,
inventory=self._inventory,
safe_basedir=self.safe_basedir,
)
return data
def __setstate__(self, data):
self._fact_cache = data.get('fact_cache', defaultdict(dict))
self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
self._vars_cache = data.get('vars_cache', defaultdict(dict))
self._extra_vars = data.get('extra_vars', dict())
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
self._inventory = data.get('inventory', None)
self._options_vars = data.get('options_vars', dict())
self.safe_basedir = data.get('safe_basedir', False)
self._loader = None
self._hostvars = None
@property
def extra_vars(self):
return self._extra_vars
def set_inventory(self, inventory):
self._inventory = inventory
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=False, use_cache=True,
_hosts=None, _hosts_all=None, stage='task'):
"""
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying
on the functionality they provide. These arguments may be removed at a later date without a deprecation
period and without warning.
"""
if include_delegate_to:
display.deprecated(
"`VariableManager.get_vars`'s argument `include_delegate_to` has no longer any effect.",
version="2.19",
)
display.debug("in VariableManager get_vars()")
all_vars = dict()
magic_variables = self._get_magic_variables(
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
_hosts=_hosts,
_hosts_all=_hosts_all,
)
_vars_sources = {}
def _combine_and_track(data, new_data, source):
"""
Wrapper function to update var sources dict and call combine_vars()
See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
"""
if new_data == {}:
return data
if C.DEFAULT_DEBUG:
# Populate var sources dict
for key in new_data:
_vars_sources[key] = source
return combine_vars(data, new_data)
# default for all cases
basedirs = []
if self.safe_basedir: # avoid adhoc/console loading cwd
basedirs = [self._loader.get_basedir()]
if play:
# get role defaults (lowest precedence)
for role in play.roles:
if role.public:
all_vars = _combine_and_track(all_vars, role.get_default_vars(), "role '%s' defaults" % role.name)
if task:
# set basedirs
if C.PLAYBOOK_VARS_ROOT == 'all': # should be default
basedirs = task.get_search_path()
elif C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir'): # only option in 2.4.0
basedirs = [task.get_search_path()[0]]
elif C.PLAYBOOK_VARS_ROOT != 'top':
# preserves default basedirs, only option pre 2.3
raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
# TODO: investigate why we need play or include_role check?
if task._role is not None and (play or task.action in C._ACTION_INCLUDE_ROLE):
all_vars = _combine_and_track(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()), "role '%s' defaults" % task._role.name)
if host:
# THE 'all' group and the rest of groups for a host, used below
all_group = self._inventory.groups.get('all')
host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])
def _get_plugin_vars(plugin, path, entities):
data = {}
try:
data = plugin.get_vars(self._loader, path, entities)
except AttributeError:
try:
for entity in entities:
if isinstance(entity, Host):
data |= plugin.get_host_vars(entity.name)
else:
data |= plugin.get_group_vars(entity.name)
except AttributeError:
if hasattr(plugin, 'run'):
raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
else:
raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
return data
# internal functions that actually do the work
def _plugins_inventory(entities):
""" merges all entities by inventory source """
return get_vars_from_inventory_sources(self._loader, self._inventory._sources, entities, stage)
def _plugins_play(entities):
""" merges all entities adjacent to play """
data = {}
for path in basedirs:
data = _combine_and_track(data, get_vars_from_path(self._loader, path, entities, stage), "path '%s'" % path)
return data
# configurable functions that are sortable via config, remember to add to _ALLOWED if expanding this list
def all_inventory():
return all_group.get_vars()
def all_plugins_inventory():
return _plugins_inventory([all_group])
def all_plugins_play():
return _plugins_play([all_group])
def groups_inventory():
""" gets group vars from inventory """
return get_group_vars(host_groups)
def groups_plugins_inventory():
""" gets plugin sources from inventory for groups """
return _plugins_inventory(host_groups)
def groups_plugins_play():
""" gets plugin sources from play for groups """
return _plugins_play(host_groups)
def plugins_by_groups():
"""
merges all plugin sources by group,
This should be used instead, NOT in combination with the other groups_plugins* functions
"""
data = {}
for group in host_groups:
data[group] = _combine_and_track(data[group], _plugins_inventory(group), "inventory group_vars for '%s'" % group)
data[group] = _combine_and_track(data[group], _plugins_play(group), "playbook group_vars for '%s'" % group)
return data
# Merge groups as per precedence config
# only allow to call the functions we want exposed
for entry in C.VARIABLE_PRECEDENCE:
if entry in self._ALLOWED:
display.debug('Calling %s to load vars for %s' % (entry, host.name))
all_vars = _combine_and_track(all_vars, locals()[entry](), "group vars, precedence entry '%s'" % entry)
else:
display.warning('Ignoring unknown variable precedence entry: %s' % (entry))
# host vars, from inventory, inventory adjacent and play adjacent via plugins
all_vars = _combine_and_track(all_vars, host.get_vars(), "host vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_inventory([host]), "inventory host_vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_play([host]), "playbook host_vars for '%s'" % host)
# finally, the facts caches for this host, if it exists
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
try:
facts = wrap_var(self._fact_cache.get(host.name, {}))
all_vars |= namespace_facts(facts)
# push facts to main namespace
if C.INJECT_FACTS_AS_VARS:
all_vars = _combine_and_track(all_vars, wrap_var(clean_facts(facts)), "facts")
else:
# always 'promote' ansible_local
all_vars = _combine_and_track(all_vars, wrap_var({'ansible_local': facts.get('ansible_local', {})}), "facts")
except KeyError:
pass
if play:
all_vars = _combine_and_track(all_vars, play.get_vars(), "play vars")
vars_files = play.get_vars_files()
try:
for vars_file_item in vars_files:
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
# NOTE: this makes them depend on host vars/facts so things like
# ansible_facts['os_distribution'] can be used, ala include_vars.
# Consider DEPRECATING this in the future, since we have include_vars ...
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=self._loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [vars_file_list]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
if not (isinstance(vars_file, Sequence)):
raise AnsibleError(
"Invalid vars_files entry found: %r\n"
"vars_files entries should be either a string type or "
"a list of string types after template expansion" % vars_file
)
try:
play_search_stack = play.get_search_path()
found_file = self._loader.path_dwim_relative_stack(play_search_stack, 'vars', vars_file)
data = preprocess_vars(self._loader.load_from_file(found_file, unsafe=True, cache='vaulted'))
if data is not None:
for item in data:
all_vars = _combine_and_track(all_vars, item, "play vars_files from '%s'" % vars_file)
break
except AnsibleFileNotFound:
# we continue on loader failures
continue
except AnsibleParserError:
raise
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'"
% vars_file_item, obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be because of that
# so just show a warning and continue
display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
continue
display.vvv("Read vars_file '%s'" % vars_file_item)
except TypeError:
raise AnsibleParserError("Error while reading vars files - please supply a list of file names. "
"Got '%s' of type %s" % (vars_files, type(vars_files)))
# We now merge in all exported vars from all roles in the play (very high precedence)
for role in play.roles:
if role.public:
all_vars = _combine_and_track(all_vars, role.get_vars(include_params=False, only_exports=True), "role '%s' exported vars" % role.name)
# next, we merge in the vars from the role, which will specifically
# follow the role dependency chain, and then we merge in the tasks
# vars (which will look at parent blocks/task includes)
if task:
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False, only_exports=False),
"role '%s' all vars" % task._role.name)
all_vars = _combine_and_track(all_vars, task.get_vars(), "task vars")
# next, we merge in the vars cache (include vars) and nonpersistent
# facts cache (set_fact/register), in that order
if host:
# include_vars non-persistent cache
all_vars = _combine_and_track(all_vars, self._vars_cache.get(host.get_name(), dict()), "include_vars")
# fact non-persistent cache
all_vars = _combine_and_track(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()), "set_fact")
# next, we merge in role params and task include params
if task:
# special case for include tasks, where the include params
# may be specified in the vars field for the task, which should
# have higher precedence than the vars/np facts above
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_role_params(task.get_dep_chain()), "role params")
all_vars = _combine_and_track(all_vars, task.get_include_params(), "include params")
# extra vars
all_vars = _combine_and_track(all_vars, self._extra_vars, "extra vars")
# magic variables
all_vars = _combine_and_track(all_vars, magic_variables, "magic vars")
# special case for the 'environment' magic variable, as someone
# may have set it as a variable and we don't want to stomp on it
if task:
all_vars['environment'] = task.environment
# 'vars' magic var
if task or play:
# has to be copy, otherwise recursive ref
all_vars['vars'] = all_vars.copy()
display.debug("done with get_vars()")
if C.DEFAULT_DEBUG:
# Use VarsWithSources wrapper class to display var sources
return VarsWithSources.new_vars_with_sources(all_vars, _vars_sources)
else:
return all_vars
def _get_magic_variables(self, play, host, task, include_hostvars, _hosts=None, _hosts_all=None):
"""
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
"""
variables = {}
variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir())
variables['ansible_playbook_python'] = sys.executable
variables['ansible_config_file'] = C.CONFIG_FILE
if play:
# using role_cache as play.roles only has 'public' roles for vars exporting
dependency_role_names = list({d.get_name() for r in play.roles for d in r.get_all_dependencies()})
play_role_names = [r.get_name() for r in play.roles]
# ansible_role_names includes all role names, dependent or directly referenced by the play
variables['ansible_role_names'] = list(set(dependency_role_names + play_role_names))
# ansible_play_role_names includes the names of all roles directly referenced by this play
# roles that are implicitly referenced via dependencies are not listed.
variables['ansible_play_role_names'] = play_role_names
# ansible_dependent_role_names includes the names of all roles that are referenced via dependencies
# dependencies that are also explicitly named as roles are included in this list
variables['ansible_dependent_role_names'] = dependency_role_names
# TODO: data tagging!!! DEPRECATED: role_names should be deprecated in favor of ansible_ prefixed ones
variables['role_names'] = variables['ansible_play_role_names']
variables['ansible_play_name'] = play.get_name()
if task:
if task._role:
variables['role_name'] = task._role.get_name(include_role_fqcn=False)
variables['role_path'] = task._role._role_path
variables['role_uuid'] = text_type(task._role._uuid)
variables['ansible_collection_name'] = task._role._role_collection
variables['ansible_role_name'] = task._role.get_name()
if self._inventory is not None:
variables['groups'] = self._inventory.get_groups_dict()
if play:
templar = Templar(loader=self._loader)
if not play.finalized and templar.is_template(play.hosts):
pattern = 'all'
else:
pattern = play.hosts or 'all'
# add the list of hosts in the play, as adjusted for limit/filters
if not _hosts_all:
_hosts_all = [h.name for h in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
if not _hosts:
_hosts = [h.name for h in self._inventory.get_hosts()]
variables['ansible_play_hosts_all'] = _hosts_all[:]
variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
variables['ansible_play_batch'] = [x for x in _hosts if x not in play._removed_hosts]
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
# however this would take work in the templating engine, so for now we'll add both
variables['play_hosts'] = variables['ansible_play_batch']
# the 'omit' value allows params to be left out if the variable they are based on is undefined
variables['omit'] = self._omit_token
# Set options vars
for option, option_value in self._options_vars.items():
variables[option] = option_value
if self._hostvars is not None and include_hostvars:
variables['hostvars'] = self._hostvars
return variables
def get_delegated_vars_and_hostname(self, templar, task, variables):
"""Get the delegated_vars for an individual task invocation, which may be in the context
of an individual loop iteration.
Not used directly be VariableManager, but used primarily within TaskExecutor
"""
delegated_vars = {}
delegated_host_name = None
if task.delegate_to:
delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
# no need to do work if omitted
if delegated_host_name != self._omit_token:
if not delegated_host_name:
raise AnsibleError('Empty hostname produced from delegate_to: "%s"' % task.delegate_to)
delegated_host = self._inventory.get_host(delegated_host_name)
if delegated_host is None:
for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
delegated_vars['ansible_delegated_vars'] = {
delegated_host_name: self.get_vars(
play=task.get_play(),
host=delegated_host,
task=task,
include_hostvars=True,
)
}
delegated_vars['ansible_delegated_vars'][delegated_host_name]['inventory_hostname'] = variables.get('inventory_hostname')
return delegated_vars, delegated_host_name
def clear_facts(self, hostname):
"""
Clears the facts for a host
"""
self._fact_cache.pop(hostname, None)
def set_host_facts(self, host, facts):
"""
Sets or updates the given facts for a host in the fact cache.
"""
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for host_facts should be a Mapping but is a %s" % type(facts))
try:
host_cache = self._fact_cache[host]
except KeyError:
# We get to set this as new
host_cache = facts
else:
if not isinstance(host_cache, MutableMapping):
raise TypeError('The object retrieved for {0} must be a MutableMapping but was'
' a {1}'.format(host, type(host_cache)))
# Update the existing facts
host_cache |= facts
# Save the facts back to the backing store
self._fact_cache[host] = host_cache
def set_nonpersistent_facts(self, host, facts):
"""
Sets or updates the given facts for a host in the fact cache.
"""
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for nonpersistent_facts should be a Mapping but is a %s" % type(facts))
try:
self._nonpersistent_fact_cache[host] |= facts
except KeyError:
self._nonpersistent_fact_cache[host] = facts
def set_host_variable(self, host, varname, value):
"""
Sets a value in the vars_cache for a host.
"""
if host not in self._vars_cache:
self._vars_cache[host] = dict()
if varname in self._vars_cache[host] and isinstance(self._vars_cache[host][varname], MutableMapping) and isinstance(value, MutableMapping):
self._vars_cache[host] = combine_vars(self._vars_cache[host], {varname: value})
else:
self._vars_cache[host][varname] = value
class VarsWithSources(MutableMapping):
"""
Dict-like class for vars that also provides source information for each var
This class can only store the source for top-level vars. It does no tracking
on its own, just shows a debug message with the information that it is provided
when a particular var is accessed.
"""
def __init__(self, *args, **kwargs):
""" Dict-compatible constructor """
self.data = dict(*args, **kwargs)
self.sources = {}
@classmethod
def new_vars_with_sources(cls, data, sources):
""" Alternate constructor method to instantiate class with sources """
v = cls(data)
v.sources = sources
return v
def get_source(self, key):
return self.sources.get(key, None)
def __getitem__(self, key):
val = self.data[key]
# See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
display.debug("variable '%s' from source: %s" % (key, self.sources.get(key, "unknown")))
return val
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
# Prevent duplicate debug messages by defining our own __contains__ pointing at the underlying dict
def __contains__(self, key):
return self.data.__contains__(key)
def copy(self):
return VarsWithSources.new_vars_with_sources(self.data.copy(), self.sources.copy())
def __or__(self, other):
if isinstance(other, MutableMapping):
c = self.data.copy()
c.update(other)
return c
return NotImplemented
def __ror__(self, other):
if isinstance(other, MutableMapping):
c = self.__class__()
c.update(other)
c.update(self.data)
return c
return NotImplemented
def __ior__(self, other):
self.data.update(other)
return self.data
| 30,972
|
Python
|
.py
| 559
| 42.062612
| 156
| 0.594311
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,049
|
clean.py
|
ansible_ansible/lib/ansible/vars/clean.py
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import re
from collections.abc import MutableMapping, MutableSequence
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils import six
from ansible.plugins.loader import connection_loader
from ansible.utils.display import Display
display = Display()
def module_response_deepcopy(v):
"""Function to create a deep copy of module response data
Designed to be used within the Ansible "engine" to improve performance
issues where ``copy.deepcopy`` was used previously, largely with CPU
and memory contention.
This only supports the following data types, and was designed to only
handle specific workloads:
* ``dict``
* ``list``
The data we pass here will come from a serialization such
as JSON, so we shouldn't have need for other data types such as
``set`` or ``tuple``.
Take note that this function should not be used extensively as a
replacement for ``deepcopy`` due to the naive way in which this
handles other data types.
Do not expect uses outside of those listed below to maintain
backwards compatibility, in case we need to extend this function
to handle our specific needs:
* ``ansible.executor.task_result.TaskResult.clean_copy``
* ``ansible.vars.clean.clean_facts``
* ``ansible.vars.namespace_facts``
"""
if isinstance(v, dict):
ret = v.copy()
items = six.iteritems(ret)
elif isinstance(v, list):
ret = v[:]
items = enumerate(ret)
else:
return v
for key, value in items:
if isinstance(value, (dict, list)):
ret[key] = module_response_deepcopy(value)
else:
ret[key] = value
return ret
def strip_internal_keys(dirty, exceptions=None):
# All keys starting with _ansible_ are internal, so change the 'dirty' mapping and remove them.
if exceptions is None:
exceptions = tuple()
if isinstance(dirty, MutableSequence):
for element in dirty:
if isinstance(element, (MutableMapping, MutableSequence)):
strip_internal_keys(element, exceptions=exceptions)
elif isinstance(dirty, MutableMapping):
# listify to avoid updating dict while iterating over it
for k in list(dirty.keys()):
if isinstance(k, six.string_types):
if k.startswith('_ansible_') and k not in exceptions:
del dirty[k]
continue
if isinstance(dirty[k], (MutableMapping, MutableSequence)):
strip_internal_keys(dirty[k], exceptions=exceptions)
else:
raise AnsibleError("Cannot strip invalid keys from %s" % type(dirty))
return dirty
def remove_internal_keys(data):
"""
More nuanced version of strip_internal_keys
"""
for key in list(data.keys()):
if (key.startswith('_ansible_') and key != '_ansible_parsed') or key in C.INTERNAL_RESULT_KEYS:
display.warning("Removed unexpected internal key in module return: %s = %s" % (key, data[key]))
del data[key]
# remove bad/empty internal keys
for key in ['warnings', 'deprecations']:
if key in data and not data[key]:
del data[key]
# cleanse fact values that are allowed from actions but not modules
for key in list(data.get('ansible_facts', {}).keys()):
if key.startswith('discovered_interpreter_') or key.startswith('ansible_discovered_interpreter_'):
del data['ansible_facts'][key]
def clean_facts(facts):
""" remove facts that can override internal keys or otherwise deemed unsafe """
data = module_response_deepcopy(facts)
remove_keys = set()
fact_keys = set(data.keys())
# first we add all of our magic variable names to the set of
# keys we want to remove from facts
# NOTE: these will eventually disappear in favor of others below
for magic_var in C.MAGIC_VARIABLE_MAPPING:
remove_keys.update(fact_keys.intersection(C.MAGIC_VARIABLE_MAPPING[magic_var]))
# remove common connection vars
remove_keys.update(fact_keys.intersection(C.COMMON_CONNECTION_VARS))
# next we remove any connection plugin specific vars
for conn_path in connection_loader.all(path_only=True):
conn_name = os.path.splitext(os.path.basename(conn_path))[0]
re_key = re.compile('^ansible_%s_' % re.escape(conn_name))
for fact_key in fact_keys:
# most lightweight VM or container tech creates devices with this pattern, this avoids filtering them out
if (re_key.match(fact_key) and not fact_key.endswith(('_bridge', '_gwbridge'))) or fact_key.startswith('ansible_become_'):
remove_keys.add(fact_key)
# remove some KNOWN keys
for hard in C.RESTRICTED_RESULT_KEYS + C.INTERNAL_RESULT_KEYS:
if hard in fact_keys:
remove_keys.add(hard)
# finally, we search for interpreter keys to remove
re_interp = re.compile('^ansible_.*_interpreter$')
for fact_key in fact_keys:
if re_interp.match(fact_key):
remove_keys.add(fact_key)
# then we remove them (except for ssh host keys)
for r_key in remove_keys:
if not r_key.startswith('ansible_ssh_host_key_'):
display.warning("Removed restricted key from module data: %s" % (r_key))
del data[r_key]
return strip_internal_keys(data)
def namespace_facts(facts):
""" return all facts inside 'ansible_facts' w/o an ansible_ prefix """
deprefixed = {}
for k in facts:
if k.startswith('ansible_') and k not in ('ansible_local',):
deprefixed[k[8:]] = module_response_deepcopy(facts[k])
else:
deprefixed[k] = module_response_deepcopy(facts[k])
return {'ansible_facts': deprefixed}
| 5,999
|
Python
|
.py
| 128
| 39.648438
| 134
| 0.677242
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,050
|
reserved.py
|
ansible_ansible/lib/ansible/vars/reserved.py
|
# (c) 2017 Ansible By Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
def get_reserved_names(include_private=True):
""" this function returns the list of reserved names associated with play objects"""
public = set()
private = set()
result = set()
# FIXME: find a way to 'not hardcode', possibly need role deps/includes
class_list = [Play, Role, Block, Task]
for aclass in class_list:
# build ordered list to loop over and dict with attributes
for name, attr in aclass.fattributes.items():
if attr.private:
private.add(name)
else:
public.add(name)
# local_action is implicit with action
if 'action' in public:
public.add('local_action')
# loop implies with_
# FIXME: remove after with_ is not only deprecated but removed
if 'loop' in private or 'loop' in public:
public.add('with_')
if include_private:
result = public.union(private)
else:
result = public
return result
def warn_if_reserved(myvars, additional=None):
""" this function warns if any variable passed conflicts with internally reserved names """
if additional is None:
reserved = _RESERVED_NAMES
else:
reserved = _RESERVED_NAMES.union(additional)
varnames = set(myvars)
varnames.discard('vars') # we add this one internally, so safe to ignore
for varname in varnames.intersection(reserved):
display.warning('Found variable using reserved name: %s' % varname)
def is_reserved_name(name):
return name in _RESERVED_NAMES
_RESERVED_NAMES = frozenset(get_reserved_names())
| 2,531
|
Python
|
.py
| 62
| 36.016129
| 95
| 0.717844
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,051
|
role.py
|
ansible_ansible/lib/ansible/galaxy/role.py
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import annotations
import errno
import datetime
import functools
import os
import tarfile
import tempfile
from collections.abc import MutableSequence
from shutil import rmtree
from ansible import context
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
from ansible.utils.path import is_subpath, unfrackpath
display = Display()
@functools.cache
def _check_working_data_filter() -> bool:
"""
Check if tarfile.data_filter implementation is working
for the current Python version or not
"""
# Implemented the following code to circumvent broken implementation of data_filter
# in tarfile. See for more information - https://github.com/python/cpython/issues/107845
# deprecated: description='probing broken data filter implementation' python_version='3.11'
ret = False
if hasattr(tarfile, 'data_filter'):
# We explicitly check if tarfile.data_filter is broken or not
ti = tarfile.TarInfo('docs/README.md')
ti.type = tarfile.SYMTYPE
ti.linkname = '../README.md'
try:
tarfile.data_filter(ti, '/foo')
except tarfile.LinkOutsideDestinationError:
pass
else:
ret = True
return ret
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml'))
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
META_REQUIREMENTS = (os.path.join('meta', 'requirements.yml'), os.path.join('meta', 'requirements.yaml'))
ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
def __init__(self, galaxy, api, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._metadata_dependencies = None
self._requirements = None
self._install_info = None
self._validate_certs = not context.CLIARGS['ignore_certs']
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.galaxy = galaxy
self._api = api
self.name = name
self.version = version
self.src = src or name
self.download_url = None
self.scm = scm
self.paths = [os.path.join(x, self.name) for x in galaxy.roles_paths]
if path is not None:
if not path.endswith(os.path.join(os.path.sep, self.name)):
path = os.path.join(path, self.name)
else:
# Look for a meta/main.ya?ml inside the potential role dir in case
# the role name is the same as parent directory of the role.
#
# Example:
# ./roles/testing/testing/meta/main.yml
for meta_main in self.META_MAIN:
if os.path.exists(os.path.join(path, name, meta_main)):
path = os.path.join(path, self.name)
break
self.path = path
else:
# use the first path by default
self.path = self.paths[0]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def api(self):
if not isinstance(self._api, GalaxyAPI):
return self._api.api
return self._api
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
for path in self.paths:
for meta_main in self.META_MAIN:
meta_path = os.path.join(path, meta_main)
if os.path.isfile(meta_path):
try:
with open(meta_path, 'r') as f:
self._metadata = yaml_load(f)
except Exception:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
break
return self._metadata
@property
def metadata_dependencies(self):
"""
Returns a list of dependencies from role metadata
"""
if self._metadata_dependencies is None:
self._metadata_dependencies = []
if self.metadata is not None:
self._metadata_dependencies = self.metadata.get('dependencies') or []
if not isinstance(self._metadata_dependencies, MutableSequence):
raise AnsibleParserError(
f"Expected role dependencies to be a list. Role {self} has meta/main.yml with dependencies {self._metadata_dependencies}"
)
return self._metadata_dependencies
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
with open(info_path, 'r') as f:
self._install_info = yaml_load(f)
except Exception:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
return self._install_info
@property
def _exists(self):
for path in self.paths:
if os.path.isdir(path):
return True
return False
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.now(datetime.timezone.utc).strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml_dump(info, f)
except Exception:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except Exception:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role to a temp location based on role data
"""
if role_data:
# first grab the file and save it to a temp location
if self.download_url is not None:
archive_url = self.download_url
elif "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent(), timeout=60)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error(u"failed to download the file: %s" % to_text(e))
return False
def install(self):
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec)
elif self.src:
if os.path.isfile(self.src):
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
role_data = self.api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.api.api_server))
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role, and should only be installed using Ansible "
"Container" % self.name)
role_versions = self.api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [v for a in role_versions if (v := LooseVersion()) and v.parse(a.get('name') or '') is None]
try:
loose_versions.sort()
except TypeError:
raise AnsibleError(
'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
'install.' % ', '.join([v.vstring for v in loose_versions])
)
self.version = to_text(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
# check if there's a source link/url for our role_version
for role_version in role_versions:
if role_version['name'] == self.version and 'source' in role_version:
self.src = role_version['source']
if role_version['name'] == self.version and 'download_url' in role_version:
self.download_url = role_version['download_url']
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the downloaded file does not appear to be a valid tar archive.")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
for meta_main in self.META_MAIN:
if meta_main in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml_load(role_tar_file.extractfile(meta_file))
except Exception:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
paths = self.paths
if self.path != paths[0]:
# path can be passed though __init__
# FIXME should this be done in __init__?
paths[:0] = self.path
paths_len = len(paths)
for idx, path in enumerate(paths):
self.path = path
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not context.CLIARGS.get("force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
resolved_archive = unfrackpath(archive_parent_dir, follow=False)
# We strip off any higher-level directories for all of the files
# contained within the tar file here. The default is 'github_repo-target'.
# Gerrit instances, on the other hand, does not have a parent directory at all.
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if not (member.isreg() or member.issym()):
continue
for attr in ('name', 'linkname'):
if not (attr_value := getattr(member, attr, None)):
continue
if attr == 'linkname':
# Symlinks are relative to the link
relative_to = os.path.dirname(getattr(member, 'name', ''))
else:
# Normalize paths that start with the archive dir
attr_value = attr_value.replace(archive_parent_dir, "", 1)
attr_value = os.path.join(*attr_value.split(os.sep)) # remove leading os.sep
relative_to = ''
full_path = os.path.join(resolved_archive, relative_to, attr_value)
if not is_subpath(full_path, resolved_archive, real=True):
err = f"Invalid {attr} for tarfile member: path {full_path} is not a subpath of the role {resolved_archive}"
raise AnsibleError(err)
relative_path_dir = os.path.join(resolved_archive, relative_to)
relative_path = os.path.join(*full_path.replace(relative_path_dir, "", 1).split(os.sep))
setattr(member, attr, relative_path)
if _check_working_data_filter():
# deprecated: description='extract fallback without filter' python_version='3.11'
role_tar_file.extract(member, to_native(self.path), filter='data') # type: ignore[call-arg]
else:
# Remove along with manual path filter once Python 3.12 is minimum supported version
role_tar_file.extract(member, to_native(self.path))
# write out the install info file for later use
self._write_galaxy_install_info()
break
except OSError as e:
if e.errno == errno.EACCES and idx < paths_len - 1:
continue
raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not (self.src and os.path.isfile(self.src)):
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
@property
def requirements(self):
"""
Returns role requirements
"""
if self._requirements is None:
self._requirements = []
for meta_requirements in self.META_REQUIREMENTS:
meta_path = os.path.join(self.path, meta_requirements)
if os.path.isfile(meta_path):
try:
with open(meta_path, 'r') as f:
self._requirements = yaml_load(f)
except Exception:
display.vvvvv("Unable to load requirements for %s" % self.name)
break
if not isinstance(self._requirements, MutableSequence):
raise AnsibleParserError(f"Expected role dependencies to be a list. Role {self} has meta/requirements.yml {self._requirements}")
return self._requirements
| 21,036
|
Python
|
.py
| 414
| 34.642512
| 152
| 0.529166
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,052
|
token.py
|
ansible_ansible/lib/ansible/galaxy/token.py
|
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import annotations
import base64
import json
import os
import time
from stat import S_IRUSR, S_IWUSR
from urllib.error import HTTPError
from ansible import constants as C
from ansible.galaxy.api import GalaxyError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.common.sentinel import Sentinel as NoTokenSentinel
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
display = Display()
class KeycloakToken(object):
"""A token granted by a Keycloak server.
Like sso.redhat.com as used by cloud.redhat.com
ie Automation Hub"""
token_type = 'Bearer'
def __init__(self, access_token=None, auth_url=None, validate_certs=True, client_id=None):
self.access_token = access_token
self.auth_url = auth_url
self._token = None
self.validate_certs = validate_certs
self.client_id = client_id
if self.client_id is None:
self.client_id = 'cloud-services'
self._expiration = None
def _form_payload(self):
return 'grant_type=refresh_token&client_id=%s&refresh_token=%s' % (self.client_id,
self.access_token)
def get(self):
if self._expiration and time.time() >= self._expiration:
self._token = None
if self._token:
return self._token
# - build a request to POST to auth_url
# - body is form encoded
# - 'refresh_token' is the offline token stored in ansible.cfg
# - 'grant_type' is 'refresh_token'
# - 'client_id' is 'cloud-services'
# - should probably be based on the contents of the
# offline_ticket's JWT payload 'aud' (audience)
# or 'azp' (Authorized party - the party to which the ID Token was issued)
payload = self._form_payload()
try:
resp = open_url(to_native(self.auth_url),
data=payload,
validate_certs=self.validate_certs,
method='POST',
http_agent=user_agent())
except HTTPError as e:
raise GalaxyError(e, 'Unable to get access token')
data = json.load(resp)
# So that we have a buffer, expire the token in ~2/3 the given value
expires_in = data['expires_in'] // 3 * 2
self._expiration = time.time() + expires_in
# - extract 'access_token'
self._token = data.get('access_token')
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class GalaxyToken(object):
""" Class to storing and retrieving local galaxy token """
token_type = 'Token'
def __init__(self, token=None):
self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict')
# Done so the config file is only opened when set/get/save is called
self._config = None
self._token = token
@property
def config(self):
if self._config is None:
self._config = self._read()
# Prioritise the token passed into the constructor
if self._token:
self._config['token'] = None if self._token is NoTokenSentinel else self._token
return self._config
def _read(self):
action = 'Opened'
if not os.path.isfile(self.b_file):
# token file not found, create and chmod u+rw
open(self.b_file, 'w').close()
os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
action = 'Created'
with open(self.b_file, 'r') as f:
config = yaml_load(f)
display.vvv('%s %s' % (action, to_text(self.b_file)))
if config and not isinstance(config, dict):
display.vvv('Galaxy token file %s malformed, unable to read it' % to_text(self.b_file))
return {}
return config or {}
def set(self, token):
self._token = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.b_file, 'w') as f:
yaml_dump(self.config, f, default_flow_style=False)
def headers(self):
headers = {}
token = self.get()
if token:
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class BasicAuthToken(object):
token_type = 'Basic'
def __init__(self, username, password=None):
self.username = username
self.password = password
self._token = None
@staticmethod
def _encode_token(username, password):
token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'),
to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '')
b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict'))
return to_text(b64_val)
def get(self):
if self._token:
return self._token
self._token = self._encode_token(self.username, self.password)
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
| 6,447
|
Python
|
.py
| 151
| 34.569536
| 103
| 0.610489
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,053
|
api.py
|
ansible_ansible/lib/ansible/galaxy/api.py
|
# (C) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import collections
import datetime
import functools
import hashlib
import json
import os
import stat
import tarfile
import time
import threading
from http import HTTPStatus
from http.client import BadStatusLine, IncompleteRead
from urllib.error import HTTPError, URLError
from urllib.parse import quote as urlquote, urlencode, urlparse, parse_qs, urljoin
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.api import retry_with_delays_and_condition
from ansible.module_utils.api import generate_jittered_backoff
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url, prepare_multipart
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash_s
from ansible.utils.path import makedirs_safe
display = Display()
_CACHE_LOCK = threading.Lock()
COLLECTION_PAGE_SIZE = 100
RETRY_HTTP_ERROR_CODES = { # TODO: Allow user-configuration
HTTPStatus.TOO_MANY_REQUESTS,
520, # Galaxy rate limit error code (Cloudflare unknown error)
HTTPStatus.BAD_GATEWAY, # Common error from galaxy that may represent any number of transient backend issues
}
def cache_lock(func):
def wrapped(*args, **kwargs):
with _CACHE_LOCK:
return func(*args, **kwargs)
return wrapped
def should_retry_error(exception):
# Note: cloud.redhat.com masks rate limit errors with 403 (Forbidden) error codes.
# Since 403 could reflect the actual problem (such as an expired token), we should
# not retry by default.
if isinstance(exception, GalaxyError) and exception.http_code in RETRY_HTTP_ERROR_CODES:
return True
if isinstance(exception, AnsibleError) and (orig_exc := getattr(exception, 'orig_exc', None)):
# URLError is often a proxy for an underlying error, handle wrapped exceptions
if isinstance(orig_exc, URLError):
orig_exc = orig_exc.reason
# Handle common URL related errors
if isinstance(orig_exc, (TimeoutError, BadStatusLine, IncompleteRead)):
return True
return False
def g_connect(versions):
"""
Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the
endpoint.
:param versions: A list of API versions that the function supports.
"""
def decorator(method):
def wrapped(self, *args, **kwargs):
if not self._available_api_versions:
display.vvvv("Initial connection to galaxy_server: %s" % self.api_server)
# Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer
# auth for Automation Hub.
n_url = self.api_server
error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url)
if self.api_server == 'https://galaxy.ansible.com' or self.api_server == 'https://galaxy.ansible.com/':
n_url = 'https://galaxy.ansible.com/api/'
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
# Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
# root (not JSON, no 'available_versions') so try appending '/api/'
if n_url.endswith('/api') or n_url.endswith('/api/'):
raise
# Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found).
n_url = _urljoin(n_url, '/api/')
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except GalaxyError as new_err:
if new_err.http_code == 404:
raise err
raise
if 'available_versions' not in data:
raise AnsibleError("Tried to find galaxy API root at %s but no 'available_versions' are available "
"on %s" % (n_url, self.api_server))
# Update api_server to point to the "real" API root, which in this case could have been the configured
# url + '/api/' appended.
self.api_server = n_url
# Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though
# it isn't returned in the available_versions dict.
available_versions = data.get('available_versions', {u'v1': u'v1/'})
if list(available_versions.keys()) == [u'v1']:
available_versions[u'v2'] = u'v2/'
self._available_api_versions = available_versions
display.vvvv("Found API version '%s' with Galaxy server %s (%s)"
% (', '.join(available_versions.keys()), self.name, self.api_server))
# Verify that the API versions the function works with are available on the server specified.
available_versions = set(self._available_api_versions.keys())
common_versions = set(versions).intersection(available_versions)
if not common_versions:
raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s"
% (method.__name__, ", ".join(versions), ", ".join(available_versions),
self.name, self.api_server))
# Warn only when we know we are talking to a collections API
if common_versions == {'v2'}:
display.deprecated(
'The v2 Ansible Galaxy API is deprecated and no longer supported. '
'Ensure that you have configured the ansible-galaxy CLI to utilize an '
'updated and supported version of Ansible Galaxy.',
version='2.20'
)
return method(self, *args, **kwargs)
return wrapped
return decorator
def get_cache_id(url):
""" Gets the cache ID for the URL specified. """
url_info = urlparse(url)
port = None
try:
port = url_info.port
except ValueError:
pass # While the URL is probably invalid, let the caller figure that out when using it
# Cannot use netloc because it could contain credentials if the server specified had them in there.
return '%s:%s' % (url_info.hostname, port or '')
@cache_lock
def _load_cache(b_cache_path):
""" Loads the cache file requested if possible. The file must not be world writable. """
cache_version = 1
if not os.path.isfile(b_cache_path):
display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path))
with open(b_cache_path, 'w'):
os.chmod(b_cache_path, 0o600)
cache_mode = os.stat(b_cache_path).st_mode
if cache_mode & stat.S_IWOTH:
display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source."
% to_text(b_cache_path))
return
with open(b_cache_path, mode='rb') as fd:
json_val = to_text(fd.read(), errors='surrogate_or_strict')
try:
cache = json.loads(json_val)
except ValueError:
cache = None
if not isinstance(cache, dict) or cache.get('version', None) != cache_version:
display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path))
cache = {'version': cache_version}
# Set the cache after we've cleared the existing entries
with open(b_cache_path, mode='wb') as fd:
fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict'))
return cache
def _urljoin(*args):
return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a)
class GalaxyError(AnsibleError):
""" Error for bad Galaxy server responses. """
def __init__(self, http_error, message):
super(GalaxyError, self).__init__(message)
self.http_code = http_error.code
self.url = http_error.geturl()
try:
http_msg = to_text(http_error.read())
err_info = json.loads(http_msg)
except (AttributeError, ValueError):
err_info = {}
url_split = self.url.split('/')
if 'v2' in url_split:
galaxy_msg = err_info.get('message', http_error.reason)
code = err_info.get('code', 'Unknown')
full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code)
elif 'v3' in url_split:
errors = err_info.get('errors', [])
if not errors:
errors = [{}] # Defaults are set below, we just need to make sure 1 error is present.
message_lines = []
for error in errors:
error_msg = error.get('detail') or error.get('title') or http_error.reason
error_code = error.get('code') or 'Unknown'
message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code)
message_lines.append(message_line)
full_error_msg = "%s %s" % (message, ', '.join(message_lines))
else:
# v1 and unknown API endpoints
galaxy_msg = err_info.get('default', http_error.reason)
full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg)
self.message = to_native(full_error_msg)
# Keep the raw string results for the date. It's too complex to parse as a datetime object and the various APIs return
# them in different formats.
CollectionMetadata = collections.namedtuple('CollectionMetadata', ['namespace', 'name', 'created_str', 'modified_str'])
class CollectionVersionMetadata:
def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies, signatures_url, signatures):
"""
Contains common information about a collection on a Galaxy server to smooth through API differences for
Collection and define a standard meta info for a collection.
:param namespace: The namespace name.
:param name: The collection name.
:param version: The version that the metadata refers to.
:param download_url: The URL to download the collection.
:param artifact_sha256: The SHA256 of the collection artifact for later verification.
:param dependencies: A dict of dependencies of the collection.
:param signatures_url: The URL to the specific version of the collection.
:param signatures: The list of signatures found at the signatures_url.
"""
self.namespace = namespace
self.name = name
self.version = version
self.download_url = download_url
self.artifact_sha256 = artifact_sha256
self.dependencies = dependencies
self.signatures_url = signatures_url
self.signatures = signatures
@functools.total_ordering
class GalaxyAPI:
""" This class is meant to be used as a API client for an Ansible Galaxy server """
def __init__(
self, galaxy, name, url,
username=None, password=None, token=None, validate_certs=True,
available_api_versions=None,
clear_response_cache=False, no_cache=True,
priority=float('inf'),
timeout=60,
):
self.galaxy = galaxy
self.name = name
self.username = username
self.password = password
self.token = token
self.api_server = url
self.validate_certs = validate_certs
self.timeout = timeout
self._available_api_versions = available_api_versions or {}
self._priority = priority
self._server_timeout = timeout
b_cache_dir = to_bytes(C.GALAXY_CACHE_DIR, errors='surrogate_or_strict')
makedirs_safe(b_cache_dir, mode=0o700)
self._b_cache_path = os.path.join(b_cache_dir, b'api.json')
if clear_response_cache:
with _CACHE_LOCK:
if os.path.exists(self._b_cache_path):
display.vvvv("Clearing cache file (%s)" % to_text(self._b_cache_path))
os.remove(self._b_cache_path)
self._cache = None
if not no_cache:
self._cache = _load_cache(self._b_cache_path)
display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs))
def __str__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as a native string representation."""
return to_native(self.name)
def __unicode__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as a unicode/text string representation."""
return to_text(self.name)
def __repr__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as an inspectable string representation."""
return (
'<{instance!s} "{name!s}" @ {url!s} with priority {priority!s}>'.
format(
instance=self, name=self.name,
priority=self._priority, url=self.api_server,
)
)
def __lt__(self, other_galaxy_api):
# type: (GalaxyAPI, GalaxyAPI) -> bool
"""Return whether the instance priority is higher than other."""
if not isinstance(other_galaxy_api, self.__class__):
return NotImplemented
return (
self._priority > other_galaxy_api._priority or
self.name < self.name
)
@property # type: ignore[misc] # https://github.com/python/mypy/issues/1362
@g_connect(['v1', 'v2', 'v3'])
def available_api_versions(self):
# Calling g_connect will populate self._available_api_versions
return self._available_api_versions
@retry_with_delays_and_condition(
backoff_iterator=generate_jittered_backoff(retries=6, delay_base=2, delay_threshold=40),
should_retry_error=should_retry_error
)
def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None,
cache=False, cache_key=None):
url_info = urlparse(url)
cache_id = get_cache_id(url)
if not cache_key:
cache_key = url_info.path
query = parse_qs(url_info.query)
if cache and self._cache:
server_cache = self._cache.setdefault(cache_id, {})
iso_datetime_format = '%Y-%m-%dT%H:%M:%SZ'
valid = False
if cache_key in server_cache:
expires = datetime.datetime.strptime(server_cache[cache_key]['expires'], iso_datetime_format)
expires = expires.replace(tzinfo=datetime.timezone.utc)
valid = datetime.datetime.now(datetime.timezone.utc) < expires
is_paginated_url = 'page' in query or 'offset' in query
if valid and not is_paginated_url:
# Got a hit on the cache and we aren't getting a paginated response
path_cache = server_cache[cache_key]
if path_cache.get('paginated'):
if '/v3/' in cache_key:
res = {'links': {'next': None}}
else:
res = {'next': None}
# Technically some v3 paginated APIs return in 'data' but the caller checks the keys for this so
# always returning the cache under results is fine.
res['results'] = []
for result in path_cache['results']:
res['results'].append(result)
else:
res = path_cache['results']
return res
elif not is_paginated_url:
# The cache entry had expired or does not exist, start a new blank entry to be filled later.
expires = datetime.datetime.now(datetime.timezone.utc)
expires += datetime.timedelta(days=1)
server_cache[cache_key] = {
'expires': expires.strftime(iso_datetime_format),
'paginated': False,
}
headers = headers or {}
self._add_auth_token(headers, url, required=auth_required)
try:
display.vvvv("Calling Galaxy at %s" % url)
resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers,
method=method, timeout=self._server_timeout, http_agent=user_agent(), follow_redirects='safe')
except HTTPError as e:
raise GalaxyError(e, error_context_msg)
except Exception as e:
raise AnsibleError(
"Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e)),
orig_exc=e
)
resp_data = to_text(resp.read(), errors='surrogate_or_strict')
try:
data = json.loads(resp_data)
except ValueError:
raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s"
% (resp.url, to_native(resp_data)))
if cache and self._cache:
path_cache = self._cache[cache_id][cache_key]
# v3 can return data or results for paginated results. Scan the result so we can determine what to cache.
paginated_key = None
for key in ['data', 'results']:
if key in data:
paginated_key = key
break
if paginated_key:
path_cache['paginated'] = True
results = path_cache.setdefault('results', [])
for result in data[paginated_key]:
results.append(result)
else:
path_cache['results'] = data
return data
def _add_auth_token(self, headers, url, token_type=None, required=False):
# Don't add the auth token if one is already present
if 'Authorization' in headers:
return
if not self.token and required:
raise AnsibleError("No access token or username set. A token can be set with --api-key "
"or at {0}.".format(to_native(C.GALAXY_TOKEN_PATH)))
if self.token:
headers.update(self.token.headers())
@cache_lock
def _set_cache(self):
with open(self._b_cache_path, mode='wb') as fd:
fd.write(to_bytes(json.dumps(self._cache), errors='surrogate_or_strict'))
@g_connect(['v1'])
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/'
args = urlencode({"github_token": github_token})
try:
resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent(), timeout=self._server_timeout)
except HTTPError as e:
raise GalaxyError(e, 'Attempting to authenticate to galaxy')
except Exception as e:
raise AnsibleError('Unable to authenticate to galaxy: %s' % to_native(e), orig_exc=e)
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@g_connect(['v1'])
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/'
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
data = self._call_galaxy(url, args=urlencode(args), method="POST")
if data.get('results', None):
return data['results']
return data
@g_connect(['v1'])
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports")
if task_id is not None:
url = "%s?id=%d" % (url, task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self._call_galaxy(url)
return data['results']
@g_connect(['v1'])
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = to_text(urlquote(to_bytes(role_name)))
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except Exception:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles",
"?owner__username=%s&name=%s" % (user_name, role_name))
data = self._call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect(['v1'])
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
results = []
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related,
"?page_size=50")
data = self._call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
# https://github.com/ansible/ansible/issues/64355
# api_server contains part of the API path but next_link includes the /api part so strip it out.
url_info = urlparse(self.api_server)
base_url = "%s://%s/" % (url_info.scheme, url_info.netloc)
while not done:
url = _urljoin(base_url, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
except Exception as e:
display.warning("Unable to retrieve role (id=%s) data (%s), but this is not fatal so we continue: %s"
% (role_id, related, to_text(e)))
return results
@g_connect(['v1'])
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size")
data = self._call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = _urljoin(self.api_server, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error)))
@g_connect(['v1'])
def search_roles(self, search, **kwargs):
search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?")
if search:
search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search)))
tags = kwargs.get('tags', None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self._call_galaxy(search_url)
return data
@g_connect(['v1'])
def add_secret(self, source, github_user, github_repo, secret):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/'
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self._call_galaxy(url, args=args, method="POST")
return data
@g_connect(['v1'])
def list_secrets(self):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets")
data = self._call_galaxy(url, auth_required=True)
return data
@g_connect(['v1'])
def remove_secret(self, secret_id):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/'
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
@g_connect(['v1'])
def delete_role(self, github_user, github_repo):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole",
"?github_user=%s&github_repo=%s" % (github_user, github_repo))
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
# Collection APIs #
@g_connect(['v2', 'v3'])
def publish_collection(self, collection_path):
"""
Publishes a collection to a Galaxy server and returns the import task URI.
:param collection_path: The path to the collection tarball to publish.
:return: The import task URI that contains the import results.
"""
display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server))
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path))
elif not tarfile.is_tarfile(b_collection_path):
raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection "
"build' to create a proper release artifact." % to_native(collection_path))
with open(b_collection_path, 'rb') as collection_tar:
sha256 = secure_hash_s(collection_tar.read(), hash_func=hashlib.sha256)
content_type, b_form_data = prepare_multipart(
{
'sha256': sha256,
'file': {
'filename': b_collection_path,
'mime_type': 'application/octet-stream',
},
}
)
headers = {
'Content-type': content_type,
'Content-length': len(b_form_data),
}
if 'v3' in self.available_api_versions:
n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/'
else:
n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/'
resp = self._call_galaxy(n_url, args=b_form_data, headers=headers, method='POST', auth_required=True,
error_context_msg='Error when publishing collection to %s (%s)'
% (self.name, self.api_server))
return resp['task']
@g_connect(['v2', 'v3'])
def wait_import_task(self, task_id, timeout=0):
"""
Waits until the import process on the Galaxy server has completed or the timeout is reached.
:param task_id: The id of the import task to wait for. This can be parsed out of the return
value for GalaxyAPI.publish_collection.
:param timeout: The timeout in seconds, 0 is no timeout.
"""
state = 'waiting'
data = None
# Construct the appropriate URL per version
if 'v3' in self.available_api_versions:
full_url = _urljoin(self.api_server, self.available_api_versions['v3'],
'imports/collections', task_id, '/')
else:
full_url = _urljoin(self.api_server, self.available_api_versions['v2'],
'collection-imports', task_id, '/')
display.display("Waiting until Galaxy import task %s has completed" % full_url)
start = time.time()
wait = C.GALAXY_COLLECTION_IMPORT_POLL_INTERVAL
while timeout == 0 or (time.time() - start) < timeout:
try:
data = self._call_galaxy(full_url, method='GET', auth_required=True,
error_context_msg='Error when getting import task results at %s' % full_url)
except GalaxyError as e:
if e.http_code != 404:
raise
# The import job may not have started, and as such, the task url may not yet exist
display.vvv('Galaxy import process has not started, wait %s seconds before trying again' % wait)
time.sleep(wait)
continue
state = data.get('state', 'waiting')
if data.get('finished_at', None):
break
display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again'
% (state, wait))
time.sleep(wait)
# poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
wait = min(30, wait * C.GALAXY_COLLECTION_IMPORT_POLL_FACTOR)
if state == 'waiting':
raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
% to_native(full_url))
for message in data.get('messages', []):
level = message['level']
if level.lower() == 'error':
display.error("Galaxy import error message: %s" % message['message'])
elif level.lower() == 'warning':
display.warning("Galaxy import warning message: %s" % message['message'])
else:
display.vvv("Galaxy import message: %s - %s" % (level, message['message']))
if state == 'failed':
code = to_native(data['error'].get('code', 'UNKNOWN'))
description = to_native(
data['error'].get('description', "Unknown error, see %s for more details" % full_url))
raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code))
@g_connect(['v2', 'v3'])
def get_collection_metadata(self, namespace, name):
"""
Gets the collection information from the Galaxy server about a specific Collection.
:param namespace: The collection namespace.
:param name: The collection name.
return: CollectionMetadata about the collection.
"""
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
field_map = [
('created_str', 'created_at'),
('modified_str', 'updated_at'),
]
else:
api_path = self.available_api_versions['v2']
field_map = [
('created_str', 'created'),
('modified_str', 'modified'),
]
info_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, '/')
error_context_msg = 'Error when getting the collection info for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
data = self._call_galaxy(info_url, error_context_msg=error_context_msg)
metadata = {}
for name, api_field in field_map:
metadata[name] = data.get(api_field, None)
return CollectionMetadata(namespace, name, **metadata)
@g_connect(['v2', 'v3'])
def get_collection_version_metadata(self, namespace, name, version):
"""
Gets the collection information from the Galaxy server about a specific Collection version.
:param namespace: The collection namespace.
:param name: The collection name.
:param version: Version of the collection to get the information for.
:return: CollectionVersionMetadata about the collection at the version requested.
"""
api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
n_collection_url = _urljoin(*url_paths)
error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
% (namespace, name, version, self.name, self.api_server)
data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
self._set_cache()
signatures = data.get('signatures') or []
return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
data['download_url'], data['artifact']['sha256'],
data['metadata']['dependencies'], data['href'], signatures)
@g_connect(['v2', 'v3'])
def get_collection_versions(self, namespace, name):
"""
Gets a list of available versions for a collection on a Galaxy server.
:param namespace: The collection namespace.
:param name: The collection name.
:return: A list of versions that are available.
"""
relative_link = False
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
pagination_path = ['links', 'next']
relative_link = True # AH pagination results are relative an not an absolute URI.
else:
api_path = self.available_api_versions['v2']
pagination_path = ['next']
page_size_name = 'limit' if 'v3' in self.available_api_versions else 'page_size'
versions_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/?%s=%d' % (page_size_name, COLLECTION_PAGE_SIZE))
versions_url_info = urlparse(versions_url)
cache_key = versions_url_info.path
# We should only rely on the cache if the collection has not changed. This may slow things down but it ensures
# we are not waiting a day before finding any new collections that have been published.
if self._cache:
server_cache = self._cache.setdefault(get_cache_id(versions_url), {})
modified_cache = server_cache.setdefault('modified', {})
try:
modified_date = self.get_collection_metadata(namespace, name).modified_str
except GalaxyError as err:
if err.http_code != 404:
raise
# No collection found, return an empty list to keep things consistent with the various APIs
return []
cached_modified_date = modified_cache.get('%s.%s' % (namespace, name), None)
if cached_modified_date != modified_date:
modified_cache['%s.%s' % (namespace, name)] = modified_date
if versions_url_info.path in server_cache:
del server_cache[cache_key]
self._set_cache()
error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
try:
data = self._call_galaxy(versions_url, error_context_msg=error_context_msg, cache=True, cache_key=cache_key)
except GalaxyError as err:
if err.http_code != 404:
raise
# v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do.
return []
if 'data' in data:
# v3 automation-hub is the only known API that uses `data`
# since v3 pulp_ansible does not, we cannot rely on version
# to indicate which key to use
results_key = 'data'
else:
results_key = 'results'
versions = []
while True:
versions += [v['version'] for v in data[results_key]]
next_link = data
for path in pagination_path:
next_link = next_link.get(path, {})
if not next_link:
break
elif relative_link:
# TODO: This assumes the pagination result is relative to the root server. Will need to be verified
# with someone who knows the AH API.
# Remove the query string from the versions_url to use the next_link's query
versions_url = urljoin(versions_url, urlparse(versions_url).path)
next_link = versions_url.replace(versions_url_info.path, next_link)
data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
error_context_msg=error_context_msg, cache=True, cache_key=cache_key)
self._set_cache()
return versions
@g_connect(['v2', 'v3'])
def get_collection_signatures(self, namespace, name, version):
"""
Gets the collection signatures from the Galaxy server about a specific Collection version.
:param namespace: The collection namespace.
:param name: The collection name.
:param version: Version of the collection to get the information for.
:return: A list of signature strings.
"""
api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
n_collection_url = _urljoin(*url_paths)
error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
% (namespace, name, version, self.name, self.api_server)
data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
self._set_cache()
signatures = [signature_info["signature"] for signature_info in data.get("signatures") or []]
if not signatures:
display.vvvv(f"Server {self.api_server} has not signed {namespace}.{name}:{version}")
return signatures
| 40,169
|
Python
|
.py
| 768
| 40.604167
| 154
| 0.596814
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,054
|
__init__.py
|
ansible_ansible/lib/ansible/galaxy/__init__.py
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
""" This manages remote shared Ansible objects, mainly roles"""
from __future__ import annotations
import os
import ansible.constants as C
from ansible import context
from ansible.module_utils.common.text.converters import to_bytes
from ansible.module_utils.common.yaml import yaml_load
# default_readme_template
# default_meta_template
def get_collections_galaxy_meta_info():
meta_path = os.path.join(os.path.dirname(__file__), 'data', 'collections_galaxy_meta.yml')
with open(to_bytes(meta_path, errors='surrogate_or_strict'), 'rb') as galaxy_obj:
return yaml_load(galaxy_obj)
class Galaxy(object):
""" Keeps global galaxy info """
def __init__(self):
# TODO: eventually remove this as it contains a mismash of properties that aren't really global
# roles_path needs to be a list and will be by default
roles_path = context.CLIARGS.get('roles_path', C.DEFAULT_ROLES_PATH)
# cli option handling is responsible for splitting roles_path
self.roles_paths = roles_path
self.roles = {}
# load data path for resource usage
this_dir, this_filename = os.path.split(__file__)
type_path = context.CLIARGS.get('role_type', 'default')
if type_path == 'default':
type_path = os.path.join(type_path, context.CLIARGS.get('type'))
self.DATA_PATH = os.path.join(this_dir, 'data', type_path)
@property
def default_role_skeleton_path(self):
return self.DATA_PATH
def add_role(self, role):
self.roles[role.name] = role
def remove_role(self, role_name):
del self.roles[role_name]
| 2,497
|
Python
|
.py
| 55
| 41.127273
| 103
| 0.669415
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,055
|
user_agent.py
|
ansible_ansible/lib/ansible/galaxy/user_agent.py
|
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import platform
import sys
from ansible.module_utils.ansible_release import __version__ as ansible_version
def user_agent():
"""Returns a user agent used by ansible-galaxy to include the Ansible version, platform and python version."""
python_version = sys.version_info
return u"ansible-galaxy/{ansible_version} ({platform}; python:{py_major}.{py_minor}.{py_micro})".format(
ansible_version=ansible_version,
platform=platform.system(),
py_major=python_version.major,
py_minor=python_version.minor,
py_micro=python_version.micro,
)
| 760
|
Python
|
.py
| 16
| 42.625
| 114
| 0.726287
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,056
|
example.py.j2
|
ansible_ansible/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
|
#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible.errors import AnsibleError
try:
from ansible.plugins.terminal import NetconfBase
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
"""
except ImportError:
raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
class Netconf(NetconfBase):
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
"""
raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Not implemented")
| 1,511
|
Python
|
.py
| 37
| 37.27027
| 94
| 0.736914
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,057
|
example.py.j2
|
ansible_ansible/lib/ansible/galaxy/data/network/module_utils/example.py.j2
|
#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible.errors import AnsibleError
### Imports
try:
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.connection import Connection
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py
"""
except ImportError:
raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
### Implementation
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py
"""
| 1,538
|
Python
|
.py
| 38
| 37.526316
| 106
| 0.752336
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,058
|
example_facts.py.j2
|
ansible_ansible/lib/ansible/galaxy/data/network/library/example_facts.py.j2
|
#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible.errors import AnsibleError
### Documentation
DOCUMENTATION = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
"""
EXAMPLES = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
"""
RETURN = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
"""
### Imports
try:
from ansible.module_utils.basic import AnsibleModule
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
"""
except ImportError:
raise AnsibleError("[ {{ role_name }}_facts ]: Dependency not satisfied")
### Implementation
def main():
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
"""
raise AnsibleError(" [ {{ role_name }}_facts ]: Not Implemented")
### Entrypoint
if __name__ == '__main__':
main()
| 2,345
|
Python
|
.py
| 58
| 36.810345
| 106
| 0.733655
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,059
|
example_config.py.j2
|
ansible_ansible/lib/ansible/galaxy/data/network/library/example_config.py.j2
|
#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible.errors import AnsibleError
### Documentation
DOCUMENTATION = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
"""
EXAMPLES = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
"""
RETURN = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
"""
### Imports
try:
from ansible.module_utils.basic import AnsibleModule
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
"""
except ImportError:
raise AnsibleError("[ {{ role_name }}_config ]: Dependency not satisfied")
### Implementation
def main():
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
"""
raise AnsibleError(" [ {{ role_name }}_config ]: Not Implemented")
### Entrypoint
if __name__ == '__main__':
main()
| 2,357
|
Python
|
.py
| 58
| 37.017241
| 107
| 0.73505
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,060
|
example_command.py.j2
|
ansible_ansible/lib/ansible/galaxy/data/network/library/example_command.py.j2
|
#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible.errors import AnsibleError
### Documentation
DOCUMENTATION = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
"""
EXAMPLES = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
"""
RETURN = """
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
"""
#### Imports
try:
from ansible.module_utils.basic import AnsibleModule
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
"""
except ImportError:
raise AnsibleError("[ {{ role_name }}_command ]: Dependency not satisfied")
#### Implementation
def main():
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
"""
raise AnsibleError(" [ {{ role_name }}_command ]: Not Implemented")
#### Entrypoint
if __name__ == '__main__':
main()
| 2,372
|
Python
|
.py
| 58
| 37.275862
| 108
| 0.735473
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,061
|
example.py.j2
|
ansible_ansible/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
|
#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible.errors import AnsibleError
try:
from ansible.plugins.cliconf import CliconfBase
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py
"""
except ImportError:
raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
class Cliconf(CliconfBase):
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py
"""
raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Not implemented")
| 1,511
|
Python
|
.py
| 37
| 37.27027
| 94
| 0.736234
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,062
|
example.py.j2
|
ansible_ansible/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
|
#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from ansible.errors import AnsibleError
try:
from ansible.plugins.terminal import TerminalBase
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py
"""
except ImportError:
raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Dependency not satisfied")
class TerminalModule(TerminalBase):
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py
"""
raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Not implemented")
| 1,526
|
Python
|
.py
| 37
| 37.675676
| 95
| 0.739569
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,063
|
errors.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/errors.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Dependency resolution exceptions."""
from __future__ import annotations
try:
from resolvelib.resolvers import ( # pylint: disable=unused-import
ResolutionImpossible as CollectionDependencyResolutionImpossible,
InconsistentCandidate as CollectionDependencyInconsistentCandidate,
)
except ImportError:
class CollectionDependencyResolutionImpossible(Exception): # type: ignore[no-redef]
pass
class CollectionDependencyInconsistentCandidate(Exception): # type: ignore[no-redef]
pass
| 697
|
Python
|
.py
| 15
| 42.066667
| 92
| 0.762887
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,064
|
reporters.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/reporters.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Requirement reporter implementations."""
from __future__ import annotations
try:
from resolvelib import BaseReporter
except ImportError:
class BaseReporter: # type: ignore[no-redef]
pass
class CollectionDependencyReporter(BaseReporter):
"""A dependency reporter for Ansible Collections.
This is a proxy class allowing us to abstract away importing resolvelib
outside of the `ansible.galaxy.dependency_resolution` Python package.
"""
| 635
|
Python
|
.py
| 15
| 38.866667
| 92
| 0.754472
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,065
|
versioning.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/versioning.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019-2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Version comparison helpers."""
from __future__ import annotations
import operator
from ansible.module_utils.compat.version import LooseVersion
from ansible.utils.version import SemanticVersion
def is_pre_release(version):
# type: (str) -> bool
"""Figure out if a given version is a pre-release."""
try:
return SemanticVersion(version).is_prerelease
except ValueError:
return False
def meets_requirements(version, requirements):
# type: (str, str) -> bool
"""Verify if a given version satisfies all the requirements.
Supported version identifiers are:
* '=='
* '!='
* '>'
* '>='
* '<'
* '<='
* '*'
Each requirement is delimited by ','.
"""
op_map = {
'!=': operator.ne,
'==': operator.eq,
'=': operator.eq,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
for req in requirements.split(','):
op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
op = op_map.get(req[:op_pos])
requirement = req[op_pos:]
if not op:
requirement = req
op = operator.eq
if requirement == '*' or version == '*':
continue
if not op(
SemanticVersion(version),
SemanticVersion.from_loose_version(LooseVersion(requirement)),
):
break
else:
return True
# The loop was broken early, it does not meet all the requirements
return False
| 1,726
|
Python
|
.py
| 55
| 24.345455
| 92
| 0.581171
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,066
|
__init__.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Dependency resolution machinery."""
from __future__ import annotations
import typing as t
if t.TYPE_CHECKING:
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.dependency_resolution.dataclasses import Candidate
from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
from ansible.galaxy.dependency_resolution.providers import CollectionDependencyProvider
from ansible.galaxy.dependency_resolution.reporters import CollectionDependencyReporter
from ansible.galaxy.dependency_resolution.resolvers import CollectionDependencyResolver
def build_collection_dependency_resolver(
galaxy_apis, # type: t.Iterable[GalaxyAPI]
concrete_artifacts_manager, # type: ConcreteArtifactsManager
preferred_candidates=None, # type: t.Iterable[Candidate]
with_deps=True, # type: bool
with_pre_releases=False, # type: bool
upgrade=False, # type: bool
include_signatures=True, # type: bool
offline=False, # type: bool
): # type: (...) -> CollectionDependencyResolver
"""Return a collection dependency resolver.
The returned instance will have a ``resolve()`` method for
further consumption.
"""
return CollectionDependencyResolver(
CollectionDependencyProvider(
apis=MultiGalaxyAPIProxy(galaxy_apis, concrete_artifacts_manager, offline=offline),
concrete_artifacts_manager=concrete_artifacts_manager,
preferred_candidates=preferred_candidates,
with_deps=with_deps,
with_pre_releases=with_pre_releases,
upgrade=upgrade,
include_signatures=include_signatures,
),
CollectionDependencyReporter(),
)
| 1,996
|
Python
|
.py
| 42
| 41.119048
| 95
| 0.73549
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,067
|
resolvers.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/resolvers.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Requirement resolver implementations."""
from __future__ import annotations
try:
from resolvelib import Resolver
except ImportError:
class Resolver: # type: ignore[no-redef]
pass
class CollectionDependencyResolver(Resolver):
"""A dependency resolver for Ansible Collections.
This is a proxy class allowing us to abstract away importing resolvelib
outside of the `ansible.galaxy.dependency_resolution` Python package.
"""
| 623
|
Python
|
.py
| 15
| 38.066667
| 92
| 0.749585
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,068
|
providers.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/providers.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Requirement provider interfaces."""
from __future__ import annotations
import functools
import typing as t
if t.TYPE_CHECKING:
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection.gpg import get_signature_from_source
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate,
Requirement,
)
from ansible.galaxy.dependency_resolution.versioning import (
is_pre_release,
meets_requirements,
)
from ansible.module_utils.six import string_types
from ansible.utils.version import SemanticVersion, LooseVersion
try:
from resolvelib import AbstractProvider
from resolvelib import __version__ as resolvelib_version
except ImportError:
class AbstractProvider: # type: ignore[no-redef]
pass
resolvelib_version = '0.0.0'
# TODO: add python requirements to ansible-test's ansible-core distribution info and remove the hardcoded lowerbound/upperbound fallback
RESOLVELIB_LOWERBOUND = SemanticVersion("0.5.3")
RESOLVELIB_UPPERBOUND = SemanticVersion("1.1.0")
RESOLVELIB_VERSION = SemanticVersion.from_loose_version(LooseVersion(resolvelib_version))
class CollectionDependencyProviderBase(AbstractProvider):
"""Delegate providing a requirement interface for the resolver."""
def __init__(
self, # type: CollectionDependencyProviderBase
apis, # type: MultiGalaxyAPIProxy
concrete_artifacts_manager=None, # type: ConcreteArtifactsManager
preferred_candidates=None, # type: t.Iterable[Candidate]
with_deps=True, # type: bool
with_pre_releases=False, # type: bool
upgrade=False, # type: bool
include_signatures=True, # type: bool
): # type: (...) -> None
r"""Initialize helper attributes.
:param api: An instance of the multiple Galaxy APIs wrapper.
:param concrete_artifacts_manager: An instance of the caching \
concrete artifacts manager.
:param with_deps: A flag specifying whether the resolver \
should attempt to pull-in the deps of the \
requested requirements. On by default.
:param with_pre_releases: A flag specifying whether the \
resolver should skip pre-releases. \
Off by default.
:param upgrade: A flag specifying whether the resolver should \
skip matching versions that are not upgrades. \
Off by default.
:param include_signatures: A flag to determine whether to retrieve \
signatures from the Galaxy APIs and \
include signatures in matching Candidates. \
On by default.
"""
self._api_proxy = apis
self._make_req_from_dict = functools.partial(
Requirement.from_requirement_dict,
art_mgr=concrete_artifacts_manager,
)
self._preferred_candidates = set(preferred_candidates or ())
self._with_deps = with_deps
self._with_pre_releases = with_pre_releases
self._upgrade = upgrade
self._include_signatures = include_signatures
def identify(self, requirement_or_candidate):
# type: (t.Union[Candidate, Requirement]) -> str
"""Given requirement or candidate, return an identifier for it.
This is used to identify a requirement or candidate, e.g.
whether two requirements should have their specifier parts
(version ranges or pins) merged, whether two candidates would
conflict with each other (because they have same name but
different versions).
"""
return requirement_or_candidate.canonical_package_id
def get_preference(self, *args, **kwargs):
# type: (t.Any, t.Any) -> t.Union[float, int]
"""Return sort key function return value for given requirement.
This result should be based on preference that is defined as
"I think this requirement should be resolved first".
The lower the return value is, the more preferred this
group of arguments is.
resolvelib >=0.5.3, <0.7.0
:param resolution: Currently pinned candidate, or ``None``.
:param candidates: A list of possible candidates.
:param information: A list of requirement information.
Each ``information`` instance is a named tuple with two entries:
* ``requirement`` specifies a requirement contributing to
the current candidate list
* ``parent`` specifies the candidate that provides
(depended on) the requirement, or `None`
to indicate a root requirement.
resolvelib >=0.7.0, < 0.8.0
:param identifier: The value returned by ``identify()``.
:param resolutions: Mapping of identifier, candidate pairs.
:param candidates: Possible candidates for the identifier.
Mapping of identifier, list of candidate pairs.
:param information: Requirement information of each package.
Mapping of identifier, list of named tuple pairs.
The named tuples have the entries ``requirement`` and ``parent``.
resolvelib >=0.8.0, <= 1.0.1
:param identifier: The value returned by ``identify()``.
:param resolutions: Mapping of identifier, candidate pairs.
:param candidates: Possible candidates for the identifer.
Mapping of identifier, list of candidate pairs.
:param information: Requirement information of each package.
Mapping of identifier, list of named tuple pairs.
The named tuples have the entries ``requirement`` and ``parent``.
:param backtrack_causes: Sequence of requirement information that were
the requirements that caused the resolver to most recently backtrack.
The preference could depend on a various of issues, including
(not necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should
probably be worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this
requirement? Those with few left should likely be worked on
first, I guess?
* Are there any known conflicts for this requirement?
We should probably work on those with the most
known conflicts.
A sortable value should be returned (this will be used as the
`key` parameter of the built-in sorting function). The smaller
the value is, the more preferred this requirement is (i.e. the
sorting function is called with ``reverse=False``).
"""
raise NotImplementedError
def _get_preference(self, candidates):
# type: (list[Candidate]) -> t.Union[float, int]
if any(
candidate in self._preferred_candidates
for candidate in candidates
):
# NOTE: Prefer pre-installed candidates over newer versions
# NOTE: available from Galaxy or other sources.
return float('-inf')
return len(candidates)
def find_matches(self, *args, **kwargs):
# type: (t.Any, t.Any) -> list[Candidate]
r"""Find all possible candidates satisfying given requirements.
This tries to get candidates based on the requirements' types.
For concrete requirements (SCM, dir, namespace dir, local or
remote archives), the one-and-only match is returned
For a "named" requirement, Galaxy-compatible APIs are consulted
to find concrete candidates for this requirement. If there's a
pre-installed candidate, it's prepended in front of others.
resolvelib >=0.5.3, <0.6.0
:param requirements: A collection of requirements which all of \
the returned candidates must match. \
All requirements are guaranteed to have \
the same identifier. \
The collection is never empty.
resolvelib >=0.6.0
:param identifier: The value returned by ``identify()``.
:param requirements: The requirements all returned candidates must satisfy.
Mapping of identifier, iterator of requirement pairs.
:param incompatibilities: Incompatible versions that must be excluded
from the returned list.
:returns: An iterable that orders candidates by preference, \
e.g. the most preferred candidate comes first.
"""
raise NotImplementedError
def _find_matches(self, requirements):
# type: (list[Requirement]) -> list[Candidate]
# FIXME: The first requirement may be a Git repo followed by
# FIXME: its cloned tmp dir. Using only the first one creates
# FIXME: loops that prevent any further dependency exploration.
# FIXME: We need to figure out how to prevent this.
first_req = requirements[0]
fqcn = first_req.fqcn
# The fqcn is guaranteed to be the same
version_req = "A SemVer-compliant version or '*' is required. See https://semver.org to learn how to compose it correctly. "
version_req += "This is an issue with the collection."
# If we're upgrading collections, we can't calculate preinstalled_candidates until the latest matches are found.
# Otherwise, we can potentially avoid a Galaxy API call by doing this first.
preinstalled_candidates = set()
if not self._upgrade and first_req.type == 'galaxy':
preinstalled_candidates = {
candidate for candidate in self._preferred_candidates
if candidate.fqcn == fqcn and
all(self.is_satisfied_by(requirement, candidate) for requirement in requirements)
}
try:
coll_versions = [] if preinstalled_candidates else self._api_proxy.get_collection_versions(first_req) # type: t.Iterable[t.Tuple[str, GalaxyAPI]]
except TypeError as exc:
if first_req.is_concrete_artifact:
# Non hashable versions will cause a TypeError
raise ValueError(
f"Invalid version found for the collection '{first_req}'. {version_req}"
) from exc
# Unexpected error from a Galaxy server
raise
if first_req.is_concrete_artifact:
# FIXME: do we assume that all the following artifacts are also concrete?
# FIXME: does using fqcn==None cause us problems here?
# Ensure the version found in the concrete artifact is SemVer-compliant
for version, req_src in coll_versions:
version_err = f"Invalid version found for the collection '{first_req}': {version} ({type(version)}). {version_req}"
# NOTE: The known cases causing the version to be a non-string object come from
# NOTE: the differences in how the YAML parser normalizes ambiguous values and
# NOTE: how the end-users sometimes expect them to be parsed. Unless the users
# NOTE: explicitly use the double quotes of one of the multiline string syntaxes
# NOTE: in the collection metadata file, PyYAML will parse a value containing
# NOTE: two dot-separated integers as `float`, a single integer as `int`, and 3+
# NOTE: integers as a `str`. In some cases, they may also use an empty value
# NOTE: which is normalized as `null` and turned into `None` in the Python-land.
# NOTE: Another known mistake is setting a minor part of the SemVer notation
# NOTE: skipping the "patch" bit like "1.0" which is assumed non-compliant even
# NOTE: after the conversion to string.
if not isinstance(version, string_types):
raise ValueError(version_err)
elif version != '*':
try:
SemanticVersion(version)
except ValueError as ex:
raise ValueError(version_err) from ex
return [
Candidate(fqcn, version, _none_src_server, first_req.type, None)
for version, _none_src_server in coll_versions
]
latest_matches = []
signatures = []
extra_signature_sources = [] # type: list[str]
discarding_pre_releases_acceptable = any(
not is_pre_release(candidate_version)
for candidate_version, _src_server in coll_versions
)
# NOTE: The optimization of conditionally looping over the requirements
# NOTE: is used to skip having to compute the pinned status of all
# NOTE: requirements and apply version normalization to the found ones.
all_pinned_requirement_version_numbers = {
# NOTE: Pinned versions can start with a number, but also with an
# NOTE: equals sign. Stripping it at the beginning should be
# NOTE: enough. If there's a space after equals, the second strip
# NOTE: will take care of it.
# NOTE: Without this conversion, requirements versions like
# NOTE: '1.2.3-alpha.4' work, but '=1.2.3-alpha.4' don't.
requirement.ver.lstrip('=').strip()
for requirement in requirements
if requirement.is_pinned
} if discarding_pre_releases_acceptable else set()
for version, src_server in coll_versions:
tmp_candidate = Candidate(fqcn, version, src_server, 'galaxy', None)
for requirement in requirements:
candidate_satisfies_requirement = self.is_satisfied_by(
requirement, tmp_candidate,
)
if not candidate_satisfies_requirement:
break
should_disregard_pre_release_candidate = (
# NOTE: Do not discard pre-release candidates in the
# NOTE: following cases:
# NOTE: * the end-user requested pre-releases explicitly;
# NOTE: * the candidate is a concrete artifact (e.g. a
# NOTE: Git repository, subdirs, a tarball URL, or a
# NOTE: local dir or file etc.);
# NOTE: * the candidate's pre-release version exactly
# NOTE: matches a version specifically requested by one
# NOTE: of the requirements in the current match
# NOTE: discovery round (i.e. matching a requirement
# NOTE: that is not a range but an explicit specific
# NOTE: version pin). This works when some requirements
# NOTE: request version ranges but others (possibly on
# NOTE: different dependency tree level depths) demand
# NOTE: pre-release dependency versions, even if those
# NOTE: dependencies are transitive.
is_pre_release(tmp_candidate.ver)
and discarding_pre_releases_acceptable
and not (
self._with_pre_releases
or tmp_candidate.is_concrete_artifact
or version in all_pinned_requirement_version_numbers
)
)
if should_disregard_pre_release_candidate:
break
# FIXME
# candidate_is_from_requested_source = (
# requirement.src is None # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str
# or requirement.src == candidate.src
# )
# if not candidate_is_from_requested_source:
# break
if not self._include_signatures:
continue
extra_signature_sources.extend(requirement.signature_sources or [])
else: # candidate satisfies requirements, `break` never happened
if self._include_signatures:
for extra_source in extra_signature_sources:
signatures.append(get_signature_from_source(extra_source))
latest_matches.append(
Candidate(fqcn, version, src_server, 'galaxy', frozenset(signatures))
)
latest_matches.sort(
key=lambda candidate: (
SemanticVersion(candidate.ver), candidate.src,
),
reverse=True, # prefer newer versions over older ones
)
if not preinstalled_candidates:
preinstalled_candidates = {
candidate for candidate in self._preferred_candidates
if candidate.fqcn == fqcn and
(
# check if an upgrade is necessary
all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) and
(
not self._upgrade or
# check if an upgrade is preferred
all(SemanticVersion(latest.ver) <= SemanticVersion(candidate.ver) for latest in latest_matches)
)
)
}
return list(preinstalled_candidates) + latest_matches
def is_satisfied_by(self, requirement, candidate):
# type: (Requirement, Candidate) -> bool
r"""Whether the given requirement is satisfiable by a candidate.
:param requirement: A requirement that produced the `candidate`.
:param candidate: A pinned candidate supposedly matchine the \
`requirement` specifier. It is guaranteed to \
have been generated from the `requirement`.
:returns: Indication whether the `candidate` is a viable \
solution to the `requirement`.
"""
# NOTE: This is a set of Pipenv-inspired optimizations. Ref:
# https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74
if (
requirement.is_virtual or
candidate.is_virtual or
requirement.ver == '*'
):
return True
return meets_requirements(
version=candidate.ver,
requirements=requirement.ver,
)
def get_dependencies(self, candidate):
# type: (Candidate) -> list[Candidate]
r"""Get direct dependencies of a candidate.
:returns: A collection of requirements that `candidate` \
specifies as its dependencies.
"""
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implementation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it populates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
# Classes to handle resolvelib API changes between minor versions for 0.X
class CollectionDependencyProvider050(CollectionDependencyProviderBase):
def find_matches(self, requirements): # type: ignore[override]
# type: (list[Requirement]) -> list[Candidate]
return self._find_matches(requirements)
def get_preference(self, resolution, candidates, information): # type: ignore[override]
# type: (t.Optional[Candidate], list[Candidate], list[t.NamedTuple]) -> t.Union[float, int]
return self._get_preference(candidates)
class CollectionDependencyProvider060(CollectionDependencyProviderBase):
def find_matches(self, identifier, requirements, incompatibilities): # type: ignore[override]
# type: (str, t.Mapping[str, t.Iterator[Requirement]], t.Mapping[str, t.Iterator[Requirement]]) -> list[Candidate]
return [
match for match in self._find_matches(list(requirements[identifier]))
if not any(match.ver == incompat.ver for incompat in incompatibilities[identifier])
]
def get_preference(self, resolution, candidates, information): # type: ignore[override]
# type: (t.Optional[Candidate], list[Candidate], list[t.NamedTuple]) -> t.Union[float, int]
return self._get_preference(candidates)
class CollectionDependencyProvider070(CollectionDependencyProvider060):
def get_preference(self, identifier, resolutions, candidates, information): # type: ignore[override]
# type: (str, t.Mapping[str, Candidate], t.Mapping[str, t.Iterator[Candidate]], t.Iterator[t.NamedTuple]) -> t.Union[float, int]
return self._get_preference(list(candidates[identifier]))
class CollectionDependencyProvider080(CollectionDependencyProvider060):
def get_preference(self, identifier, resolutions, candidates, information, backtrack_causes): # type: ignore[override]
# type: (str, t.Mapping[str, Candidate], t.Mapping[str, t.Iterator[Candidate]], t.Iterator[t.NamedTuple], t.Sequence) -> t.Union[float, int]
return self._get_preference(list(candidates[identifier]))
def _get_provider(): # type () -> CollectionDependencyProviderBase
if RESOLVELIB_VERSION >= SemanticVersion("0.8.0"):
return CollectionDependencyProvider080
if RESOLVELIB_VERSION >= SemanticVersion("0.7.0"):
return CollectionDependencyProvider070
if RESOLVELIB_VERSION >= SemanticVersion("0.6.0"):
return CollectionDependencyProvider060
return CollectionDependencyProvider050
CollectionDependencyProvider = _get_provider()
| 23,419
|
Python
|
.py
| 410
| 44.792683
| 158
| 0.635634
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,069
|
dataclasses.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/dataclasses.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Dependency structs."""
# FIXME: add caching all over the place
from __future__ import annotations
import os
import typing as t
from collections import namedtuple
from collections.abc import MutableSequence, MutableMapping
from glob import iglob
from urllib.parse import urlparse
from yaml import safe_load
if t.TYPE_CHECKING:
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
Collection = t.TypeVar(
'Collection',
'Candidate', 'Requirement',
'_ComputedReqKindsMixin',
)
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import HAS_PACKAGING, PkgReq
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
_ALLOW_CONCRETE_POINTER_IN_SOURCE = False # NOTE: This is a feature flag
_GALAXY_YAML = b'galaxy.yml'
_MANIFEST_JSON = b'MANIFEST.json'
_SOURCE_METADATA_FILE = b'GALAXY.yml'
display = Display()
def get_validated_source_info(b_source_info_path, namespace, name, version):
source_info_path = to_text(b_source_info_path, errors='surrogate_or_strict')
if not os.path.isfile(b_source_info_path):
return None
try:
with open(b_source_info_path, mode='rb') as fd:
metadata = safe_load(fd)
except OSError as e:
display.warning(
f"Error getting collection source information at '{source_info_path}': {to_text(e, errors='surrogate_or_strict')}"
)
return None
if not isinstance(metadata, MutableMapping):
display.warning(f"Error getting collection source information at '{source_info_path}': expected a YAML dictionary")
return None
schema_errors = _validate_v1_source_info_schema(namespace, name, version, metadata)
if schema_errors:
display.warning(f"Ignoring source metadata file at {source_info_path} due to the following errors:")
display.warning("\n".join(schema_errors))
display.warning("Correct the source metadata file by reinstalling the collection.")
return None
return metadata
def _validate_v1_source_info_schema(namespace, name, version, provided_arguments):
argument_spec_data = dict(
format_version=dict(choices=["1.0.0"]),
download_url=dict(),
version_url=dict(),
server=dict(),
signatures=dict(
type=list,
suboptions=dict(
signature=dict(),
pubkey_fingerprint=dict(),
signing_service=dict(),
pulp_created=dict(),
)
),
name=dict(choices=[name]),
namespace=dict(choices=[namespace]),
version=dict(choices=[version]),
)
if not isinstance(provided_arguments, dict):
raise AnsibleError(
f'Invalid offline source info for {namespace}.{name}:{version}, expected a dict and got {type(provided_arguments)}'
)
validator = ArgumentSpecValidator(argument_spec_data)
validation_result = validator.validate(provided_arguments)
return validation_result.error_messages
def _is_collection_src_dir(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
return os.path.isfile(os.path.join(b_dir_path, _GALAXY_YAML))
def _is_installed_collection_dir(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
return os.path.isfile(os.path.join(b_dir_path, _MANIFEST_JSON))
def _is_collection_dir(dir_path):
return (
_is_installed_collection_dir(dir_path) or
_is_collection_src_dir(dir_path)
)
def _find_collections_in_subdirs(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
subdir_glob_pattern = os.path.join(
b_dir_path,
# b'*', # namespace is supposed to be top-level per spec
b'*', # collection name
)
for subdir in iglob(subdir_glob_pattern):
if os.path.isfile(os.path.join(subdir, _MANIFEST_JSON)):
yield subdir
elif os.path.isfile(os.path.join(subdir, _GALAXY_YAML)):
yield subdir
def _is_collection_namespace_dir(tested_str):
return any(_find_collections_in_subdirs(tested_str))
def _is_file_path(tested_str):
return os.path.isfile(to_bytes(tested_str, errors='surrogate_or_strict'))
def _is_http_url(tested_str):
return urlparse(tested_str).scheme.lower() in {'http', 'https'}
def _is_git_url(tested_str):
return tested_str.startswith(('git+', 'git@'))
def _is_concrete_artifact_pointer(tested_str):
return any(
predicate(tested_str)
for predicate in (
# NOTE: Maintain the checks to be sorted from light to heavy:
_is_git_url,
_is_http_url,
_is_file_path,
_is_collection_dir,
_is_collection_namespace_dir,
)
)
class _ComputedReqKindsMixin:
UNIQUE_ATTRS = ('fqcn', 'ver', 'src', 'type')
def __init__(self, *args, **kwargs):
if not self.may_have_offline_galaxy_info:
self._source_info = None
else:
info_path = self.construct_galaxy_info_path(to_bytes(self.src, errors='surrogate_or_strict'))
self._source_info = get_validated_source_info(
info_path,
self.namespace,
self.name,
self.ver
)
def __hash__(self):
return hash(tuple(getattr(self, attr) for attr in _ComputedReqKindsMixin.UNIQUE_ATTRS))
def __eq__(self, candidate):
return hash(self) == hash(candidate)
@classmethod
def from_dir_path_as_unknown( # type: ignore[misc]
cls, # type: t.Type[Collection]
dir_path, # type: bytes
art_mgr, # type: ConcreteArtifactsManager
): # type: (...) -> Collection
"""Make collection from an unspecified dir type.
This alternative constructor attempts to grab metadata from the
given path if it's a directory. If there's no metadata, it
falls back to guessing the FQCN based on the directory path and
sets the version to "*".
It raises a ValueError immediately if the input is not an
existing directory path.
"""
if not os.path.isdir(dir_path):
raise ValueError(
"The collection directory '{path!s}' doesn't exist".
format(path=to_native(dir_path)),
)
try:
return cls.from_dir_path(dir_path, art_mgr)
except ValueError:
return cls.from_dir_path_implicit(dir_path)
@classmethod
def from_dir_path( # type: ignore[misc]
cls, # type: t.Type[Collection]
dir_path, # type: bytes
art_mgr, # type: ConcreteArtifactsManager
): # type: (...) -> Collection
"""Make collection from an directory with metadata."""
if dir_path.endswith(to_bytes(os.path.sep)):
dir_path = dir_path.rstrip(to_bytes(os.path.sep))
if not _is_collection_dir(dir_path):
display.warning(
u"Collection at '{path!s}' does not have a {manifest_json!s} "
u'file, nor has it {galaxy_yml!s}: cannot detect version.'.
format(
galaxy_yml=to_text(_GALAXY_YAML),
manifest_json=to_text(_MANIFEST_JSON),
path=to_text(dir_path, errors='surrogate_or_strict'),
),
)
raise ValueError(
'`dir_path` argument must be an installed or a source'
' collection directory.',
)
tmp_inst_req = cls(None, None, dir_path, 'dir', None)
req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
try:
req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req)
except TypeError as err:
# Looks like installed/source dir but isn't: doesn't have valid metadata.
display.warning(
u"Collection at '{path!s}' has a {manifest_json!s} "
u"or {galaxy_yml!s} file but it contains invalid metadata.".
format(
galaxy_yml=to_text(_GALAXY_YAML),
manifest_json=to_text(_MANIFEST_JSON),
path=to_text(dir_path, errors='surrogate_or_strict'),
),
)
raise ValueError(
"Collection at '{path!s}' has invalid metadata".
format(path=to_text(dir_path, errors='surrogate_or_strict'))
) from err
return cls(req_name, req_version, dir_path, 'dir', None)
@classmethod
def from_dir_path_implicit( # type: ignore[misc]
cls, # type: t.Type[Collection]
dir_path, # type: bytes
): # type: (...) -> Collection
"""Construct a collection instance based on an arbitrary dir.
This alternative constructor infers the FQCN based on the parent
and current directory names. It also sets the version to "*"
regardless of whether any of known metadata files are present.
"""
# There is no metadata, but it isn't required for a functional collection. Determine the namespace.name from the path.
if dir_path.endswith(to_bytes(os.path.sep)):
dir_path = dir_path.rstrip(to_bytes(os.path.sep))
u_dir_path = to_text(dir_path, errors='surrogate_or_strict')
path_list = u_dir_path.split(os.path.sep)
req_name = '.'.join(path_list[-2:])
return cls(req_name, '*', dir_path, 'dir', None) # type: ignore[call-arg]
@classmethod
def from_string(cls, collection_input, artifacts_manager, supplemental_signatures):
req = {}
if _is_concrete_artifact_pointer(collection_input) or AnsibleCollectionRef.is_valid_collection_name(collection_input):
# Arg is a file path or URL to a collection, or just a collection
req['name'] = collection_input
elif ':' in collection_input:
req['name'], _sep, req['version'] = collection_input.partition(':')
if not req['version']:
del req['version']
else:
if not HAS_PACKAGING:
raise AnsibleError("Failed to import packaging, check that a supported version is installed")
try:
pkg_req = PkgReq(collection_input)
except Exception as e:
# packaging doesn't know what this is, let it fly, better errors happen in from_requirement_dict
req['name'] = collection_input
else:
req['name'] = pkg_req.name
if pkg_req.specifier:
req['version'] = to_text(pkg_req.specifier)
req['signatures'] = supplemental_signatures
return cls.from_requirement_dict(req, artifacts_manager)
@classmethod
def from_requirement_dict(cls, collection_req, art_mgr, validate_signature_options=True):
req_name = collection_req.get('name', None)
req_version = collection_req.get('version', '*')
req_type = collection_req.get('type')
# TODO: decide how to deprecate the old src API behavior
req_source = collection_req.get('source', None)
req_signature_sources = collection_req.get('signatures', None)
if req_signature_sources is not None:
if validate_signature_options and art_mgr.keyring is None:
raise AnsibleError(
f"Signatures were provided to verify {req_name} but no keyring was configured."
)
if not isinstance(req_signature_sources, MutableSequence):
req_signature_sources = [req_signature_sources]
req_signature_sources = frozenset(req_signature_sources)
if req_type is None:
if ( # FIXME: decide on the future behavior:
_ALLOW_CONCRETE_POINTER_IN_SOURCE
and req_source is not None
and _is_concrete_artifact_pointer(req_source)
):
src_path = req_source
elif (
req_name is not None
and AnsibleCollectionRef.is_valid_collection_name(req_name)
):
req_type = 'galaxy'
elif (
req_name is not None
and _is_concrete_artifact_pointer(req_name)
):
src_path, req_name = req_name, None
else:
dir_tip_tmpl = ( # NOTE: leading LFs are for concat
'\n\nTip: Make sure you are pointing to the right '
'subdirectory — `{src!s}` looks like a directory '
'but it is neither a collection, nor a namespace '
'dir.'
)
if req_source is not None and os.path.isdir(req_source):
tip = dir_tip_tmpl.format(src=req_source)
elif req_name is not None and os.path.isdir(req_name):
tip = dir_tip_tmpl.format(src=req_name)
elif req_name:
tip = '\n\nCould not find {0}.'.format(req_name)
else:
tip = ''
raise AnsibleError( # NOTE: I'd prefer a ValueError instead
'Neither the collection requirement entry key '
"'name', nor 'source' point to a concrete "
"resolvable collection artifact. Also 'name' is "
'not an FQCN. A valid collection name must be in '
'the format <namespace>.<collection>. Please make '
'sure that the namespace and the collection name '
'contain characters from [a-zA-Z0-9_] only.'
'{extra_tip!s}'.format(extra_tip=tip),
)
if req_type is None:
if _is_git_url(src_path):
req_type = 'git'
req_source = src_path
elif _is_http_url(src_path):
req_type = 'url'
req_source = src_path
elif _is_file_path(src_path):
req_type = 'file'
req_source = src_path
elif _is_collection_dir(src_path):
if _is_installed_collection_dir(src_path) and _is_collection_src_dir(src_path):
# Note that ``download`` requires a dir with a ``galaxy.yml`` and fails if it
# doesn't exist, but if a ``MANIFEST.json`` also exists, it would be used
# instead of the ``galaxy.yml``.
raise AnsibleError(
u"Collection requirement at '{path!s}' has both a {manifest_json!s} "
u"file and a {galaxy_yml!s}.\nThe requirement must either be an installed "
u"collection directory or a source collection directory, not both.".
format(
path=to_text(src_path, errors='surrogate_or_strict'),
manifest_json=to_text(_MANIFEST_JSON),
galaxy_yml=to_text(_GALAXY_YAML),
)
)
req_type = 'dir'
req_source = src_path
elif _is_collection_namespace_dir(src_path):
req_name = None # No name for a virtual req or "namespace."?
req_type = 'subdirs'
req_source = src_path
else:
raise AnsibleError( # NOTE: this is never supposed to be hit
'Failed to automatically detect the collection '
'requirement type.',
)
if req_type not in {'file', 'galaxy', 'git', 'url', 'dir', 'subdirs'}:
raise AnsibleError(
"The collection requirement entry key 'type' must be "
'one of file, galaxy, git, dir, subdirs, or url.'
)
if req_name is None and req_type == 'galaxy':
raise AnsibleError(
'Collections requirement entry should contain '
"the key 'name' if it's requested from a Galaxy-like "
'index server.',
)
if req_type != 'galaxy' and req_source is None:
req_source, req_name = req_name, None
if (
req_type == 'galaxy' and
isinstance(req_source, GalaxyAPI) and
not _is_http_url(req_source.api_server)
):
raise AnsibleError(
"Collections requirement 'source' entry should contain "
'a valid Galaxy API URL but it does not: {not_url!s} '
'is not an HTTP URL.'.
format(not_url=req_source.api_server),
)
if req_type == 'dir' and req_source.endswith(os.path.sep):
req_source = req_source.rstrip(os.path.sep)
tmp_inst_req = cls(req_name, req_version, req_source, req_type, req_signature_sources)
if req_type not in {'galaxy', 'subdirs'} and req_name is None:
req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req) # TODO: fix the cache key in artifacts manager?
if req_type not in {'galaxy', 'subdirs'} and req_version == '*':
req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
return cls(
req_name, req_version,
req_source, req_type,
req_signature_sources,
)
def __repr__(self):
return (
'<{self!s} of type {coll_type!r} from {src!s}>'.
format(self=self, coll_type=self.type, src=self.src or 'Galaxy')
)
def __str__(self):
return to_native(self.__unicode__())
def __unicode__(self):
if self.fqcn is None:
return (
f'{self.type} collection from a Git repo' if self.is_scm
else f'{self.type} collection from a namespace'
)
return (
u'{fqcn!s}:{ver!s}'.
format(fqcn=to_text(self.fqcn), ver=to_text(self.ver))
)
@property
def may_have_offline_galaxy_info(self):
if self.fqcn is None:
# Virtual collection
return False
elif not self.is_dir or self.src is None or not _is_collection_dir(self.src):
# Not a dir or isn't on-disk
return False
return True
def construct_galaxy_info_path(self, b_collection_path):
if not self.may_have_offline_galaxy_info and not self.type == 'galaxy':
raise TypeError('Only installed collections from a Galaxy server have offline Galaxy info')
# Store Galaxy metadata adjacent to the namespace of the collection
# Chop off the last two parts of the path (/ns/coll) to get the dir containing the ns
b_src = to_bytes(b_collection_path, errors='surrogate_or_strict')
b_path_parts = b_src.split(to_bytes(os.path.sep))[0:-2]
b_metadata_dir = to_bytes(os.path.sep).join(b_path_parts)
# ns.coll-1.0.0.info
b_dir_name = to_bytes(f"{self.namespace}.{self.name}-{self.ver}.info", errors="surrogate_or_strict")
# collections/ansible_collections/ns.coll-1.0.0.info/GALAXY.yml
return os.path.join(b_metadata_dir, b_dir_name, _SOURCE_METADATA_FILE)
def _get_separate_ns_n_name(self): # FIXME: use LRU cache
return self.fqcn.split('.')
@property
def namespace(self):
if self.is_virtual:
raise TypeError(f'{self.type} collections do not have a namespace')
return self._get_separate_ns_n_name()[0]
@property
def name(self):
if self.is_virtual:
raise TypeError(f'{self.type} collections do not have a name')
return self._get_separate_ns_n_name()[-1]
@property
def canonical_package_id(self):
if not self.is_virtual:
return to_native(self.fqcn)
return (
'<virtual namespace from {src!s} of type {src_type!s}>'.
format(src=to_native(self.src), src_type=to_native(self.type))
)
@property
def is_virtual(self):
return self.is_scm or self.is_subdirs
@property
def is_file(self):
return self.type == 'file'
@property
def is_dir(self):
return self.type == 'dir'
@property
def namespace_collection_paths(self):
return [
to_native(path)
for path in _find_collections_in_subdirs(self.src)
]
@property
def is_subdirs(self):
return self.type == 'subdirs'
@property
def is_url(self):
return self.type == 'url'
@property
def is_scm(self):
return self.type == 'git'
@property
def is_concrete_artifact(self):
return self.type in {'git', 'url', 'file', 'dir', 'subdirs'}
@property
def is_online_index_pointer(self):
return not self.is_concrete_artifact
@property
def is_pinned(self):
"""Indicate if the version set is considered pinned.
This essentially computes whether the version field of the current
requirement explicitly requests a specific version and not an allowed
version range.
It is then used to help the resolvelib-based dependency resolver judge
whether it's acceptable to consider a pre-release candidate version
despite pre-release installs not being requested by the end-user
explicitly.
See https://github.com/ansible/ansible/pull/81606 for extra context.
"""
version_string = self.ver[0]
return version_string.isdigit() or not (
version_string == '*' or
version_string.startswith(('<', '>', '!='))
)
@property
def source_info(self):
return self._source_info
RequirementNamedTuple = namedtuple('Requirement', ('fqcn', 'ver', 'src', 'type', 'signature_sources')) # type: ignore[name-match]
CandidateNamedTuple = namedtuple('Candidate', ('fqcn', 'ver', 'src', 'type', 'signatures')) # type: ignore[name-match]
class Requirement(
_ComputedReqKindsMixin,
RequirementNamedTuple,
):
"""An abstract requirement request."""
def __new__(cls, *args, **kwargs):
self = RequirementNamedTuple.__new__(cls, *args, **kwargs)
return self
def __init__(self, *args, **kwargs):
super(Requirement, self).__init__()
class Candidate(
_ComputedReqKindsMixin,
CandidateNamedTuple,
):
"""A concrete collection candidate with its version resolved."""
def __new__(cls, *args, **kwargs):
self = CandidateNamedTuple.__new__(cls, *args, **kwargs)
return self
def __init__(self, *args, **kwargs):
super(Candidate, self).__init__()
def with_signatures_repopulated(self): # type: (Candidate) -> Candidate
"""Populate a new Candidate instance with Galaxy signatures.
:raises AnsibleAssertionError: If the supplied candidate is not sourced from a Galaxy-like index.
"""
if self.type != 'galaxy':
raise AnsibleAssertionError(f"Invalid collection type for {self!r}: unable to get signatures from a galaxy server.")
signatures = self.src.get_collection_signatures(self.namespace, self.name, self.ver)
return self.__class__(self.fqcn, self.ver, self.src, self.type, frozenset([*self.signatures, *signatures]))
| 23,958
|
Python
|
.py
| 517
| 35.491296
| 130
| 0.599571
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,070
|
gpg.py
|
ansible_ansible/lib/ansible/galaxy/collection/gpg.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2022, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Signature verification helpers."""
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.urls import open_url
import contextlib
import inspect
import os
import subprocess
import typing as t
from dataclasses import dataclass, fields as dc_fields
from urllib.error import HTTPError, URLError
if t.TYPE_CHECKING:
from ansible.utils.display import Display
def get_signature_from_source(source, display=None): # type: (str, t.Optional[Display]) -> str
if display is not None:
display.vvvv(f"Using signature at {source}")
try:
with open_url(
source,
http_agent=user_agent(),
validate_certs=True,
follow_redirects='safe'
) as resp:
signature = resp.read()
except (HTTPError, URLError) as e:
raise AnsibleError(
f"Failed to get signature for collection verification from '{source}': {e}"
) from e
return signature
def run_gpg_verify(
manifest_file, # type: str
signature, # type: str
keyring, # type: str
display, # type: Display
): # type: (...) -> tuple[str, int]
status_fd_read, status_fd_write = os.pipe()
# running the gpg command will create the keyring if it does not exist
remove_keybox = not os.path.exists(keyring)
cmd = [
'gpg',
f'--status-fd={status_fd_write}',
'--verify',
'--batch',
'--no-tty',
'--no-default-keyring',
f'--keyring={keyring}',
'-',
manifest_file,
]
cmd_str = ' '.join(cmd)
display.vvvv(f"Running command '{cmd}'")
try:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
pass_fds=(status_fd_write,),
encoding='utf8',
)
except (FileNotFoundError, subprocess.SubprocessError) as err:
raise AnsibleError(
f"Failed during GnuPG verification with command '{cmd_str}': {err}"
) from err
else:
stdout, stderr = p.communicate(input=signature)
finally:
os.close(status_fd_write)
if remove_keybox:
with contextlib.suppress(OSError):
os.remove(keyring)
with os.fdopen(status_fd_read) as f:
stdout = f.read()
display.vvvv(
f"stdout: \n{stdout}\nstderr: \n{stderr}\n(exit code {p.returncode})"
)
return stdout, p.returncode
def parse_gpg_errors(status_out): # type: (str) -> t.Iterator[GpgBaseError]
for line in status_out.splitlines():
if not line:
continue
try:
_dummy, status, remainder = line.split(maxsplit=2)
except ValueError:
_dummy, status = line.split(maxsplit=1)
remainder = None
try:
cls = GPG_ERROR_MAP[status]
except KeyError:
continue
fields = [status]
if remainder:
fields.extend(
remainder.split(
None,
len(dc_fields(cls)) - 2
)
)
yield cls(*fields)
@dataclass(frozen=True, slots=True)
class GpgBaseError(Exception):
status: str
@classmethod
def get_gpg_error_description(cls) -> str:
"""Return the current class description."""
return ' '.join(cls.__doc__.split())
def __post_init__(self):
for field_name, field_type in inspect.get_annotations(type(self), eval_str=True).items():
super(GpgBaseError, self).__setattr__(field_name, field_type(getattr(self, field_name)))
@dataclass(frozen=True, slots=True)
class GpgExpSig(GpgBaseError):
"""The signature with the keyid is good, but the signature is expired."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgExpKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by an expired key."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgRevKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by a revoked key."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgBadSig(GpgBaseError):
"""The signature with the keyid has not been verified okay."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgErrSig(GpgBaseError):
""""It was not possible to check the signature. This may be caused by
a missing public key or an unsupported algorithm. A RC of 4
indicates unknown algorithm, a 9 indicates a missing public
key.
"""
keyid: str
pkalgo: int
hashalgo: int
sig_class: str
time: int
rc: int
fpr: str
@dataclass(frozen=True, slots=True)
class GpgNoPubkey(GpgBaseError):
"""The public key is not available."""
keyid: str
@dataclass(frozen=True, slots=True)
class GpgMissingPassPhrase(GpgBaseError):
"""No passphrase was supplied."""
@dataclass(frozen=True, slots=True)
class GpgBadPassphrase(GpgBaseError):
"""The supplied passphrase was wrong or not given."""
keyid: str
@dataclass(frozen=True, slots=True)
class GpgNoData(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
- 2 :: Expected a packet but did not find one.
- 3 :: Invalid packet found, this may indicate a non OpenPGP
message.
- 4 :: Signature expected but not found.
"""
what: str
@dataclass(frozen=True, slots=True)
class GpgUnexpected(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
- 2 :: Expected a packet but did not find one.
- 3 :: Invalid packet found, this may indicate a non OpenPGP
message.
- 4 :: Signature expected but not found.
"""
what: str
@dataclass(frozen=True, slots=True)
class GpgError(GpgBaseError):
"""This is a generic error status message, it might be followed by error location specific data."""
location: str
code: int
more: str = ""
@dataclass(frozen=True, slots=True)
class GpgFailure(GpgBaseError):
"""This is the counterpart to SUCCESS and used to indicate a program failure."""
location: str
code: int
@dataclass(frozen=True, slots=True)
class GpgBadArmor(GpgBaseError):
"""The ASCII armor is corrupted."""
@dataclass(frozen=True, slots=True)
class GpgKeyExpired(GpgBaseError):
"""The key has expired."""
timestamp: int
@dataclass(frozen=True, slots=True)
class GpgKeyRevoked(GpgBaseError):
"""The used key has been revoked by its owner."""
@dataclass(frozen=True, slots=True)
class GpgNoSecKey(GpgBaseError):
"""The secret key is not available."""
keyid: str
GPG_ERROR_MAP = {
'EXPSIG': GpgExpSig,
'EXPKEYSIG': GpgExpKeySig,
'REVKEYSIG': GpgRevKeySig,
'BADSIG': GpgBadSig,
'ERRSIG': GpgErrSig,
'NO_PUBKEY': GpgNoPubkey,
'MISSING_PASSPHRASE': GpgMissingPassPhrase,
'BAD_PASSPHRASE': GpgBadPassphrase,
'NODATA': GpgNoData,
'UNEXPECTED': GpgUnexpected,
'ERROR': GpgError,
'FAILURE': GpgFailure,
'BADARMOR': GpgBadArmor,
'KEYEXPIRED': GpgKeyExpired,
'KEYREVOKED': GpgKeyRevoked,
'NO_SECKEY': GpgNoSecKey,
}
| 7,540
|
Python
|
.py
| 221
| 28.036199
| 103
| 0.657808
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,071
|
concrete_artifact_manager.py
|
ansible_ansible/lib/ansible/galaxy/collection/concrete_artifact_manager.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Concrete collection candidate management helper module."""
from __future__ import annotations
import json
import os
import tarfile
import subprocess
import typing as t
import yaml
from contextlib import contextmanager
from hashlib import sha256
from urllib.error import URLError
from urllib.parse import urldefrag
from shutil import rmtree
from tempfile import mkdtemp
if t.TYPE_CHECKING:
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate, Collection, Requirement,
)
from ansible.galaxy.token import GalaxyToken
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.api import should_retry_error
from ansible.galaxy.dependency_resolution.dataclasses import _GALAXY_YAML
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.api import retry_with_delays_and_condition
from ansible.module_utils.api import generate_jittered_backoff
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.yaml import yaml_load
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
import ansible.constants as C
display = Display()
MANIFEST_FILENAME = 'MANIFEST.json'
class ConcreteArtifactsManager:
"""Manager for on-disk collection artifacts.
It is responsible for:
* downloading remote collections from Galaxy-compatible servers and
direct links to tarballs or SCM repositories
* keeping track of local ones
* keeping track of Galaxy API tokens for downloads from Galaxy'ish
as well as the artifact hashes
* keeping track of Galaxy API signatures for downloads from Galaxy'ish
* caching all of above
* retrieving the metadata out of the downloaded artifacts
"""
def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60, required_signature_count=None, ignore_signature_errors=None):
# type: (bytes, bool, str, int, str, list[str]) -> None
"""Initialize ConcreteArtifactsManager caches and constraints."""
self._validate_certs = validate_certs # type: bool
self._artifact_cache = {} # type: dict[bytes, bytes]
self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes]
self._artifact_meta_cache = {} # type: dict[bytes, dict[str, str | list[str] | dict[str, str] | None | t.Type[Sentinel]]]
self._galaxy_collection_cache = {} # type: dict[Candidate | Requirement, tuple[str, str, GalaxyToken]]
self._galaxy_collection_origin_cache = {} # type: dict[Candidate, tuple[str, list[dict[str, str]]]]
self._b_working_directory = b_working_directory # type: bytes
self._supplemental_signature_cache = {} # type: dict[str, str]
self._keyring = keyring # type: str
self.timeout = timeout # type: int
self._required_signature_count = required_signature_count # type: str
self._ignore_signature_errors = ignore_signature_errors # type: list[str]
self._require_build_metadata = True # type: bool
@property
def keyring(self):
return self._keyring
@property
def required_successful_signature_count(self):
return self._required_signature_count
@property
def ignore_signature_errors(self):
if self._ignore_signature_errors is None:
return []
return self._ignore_signature_errors
@property
def require_build_metadata(self):
# type: () -> bool
return self._require_build_metadata
@require_build_metadata.setter
def require_build_metadata(self, value):
# type: (bool) -> None
self._require_build_metadata = value
def get_galaxy_artifact_source_info(self, collection):
# type: (Candidate) -> dict[str, t.Union[str, list[dict[str, str]]]]
server = collection.src.api_server
try:
download_url = self._galaxy_collection_cache[collection][0]
signatures_url, signatures = self._galaxy_collection_origin_cache[collection]
except KeyError as key_err:
raise RuntimeError(
'The is no known source for {coll!s}'.
format(coll=collection),
) from key_err
return {
"format_version": "1.0.0",
"namespace": collection.namespace,
"name": collection.name,
"version": collection.ver,
"server": server,
"version_url": signatures_url,
"download_url": download_url,
"signatures": signatures,
}
def get_galaxy_artifact_path(self, collection):
# type: (t.Union[Candidate, Requirement]) -> bytes
"""Given a Galaxy-stored collection, return a cached path.
If it's not yet on disk, this method downloads the artifact first.
"""
try:
return self._galaxy_artifact_cache[collection]
except KeyError:
pass
try:
url, sha256_hash, token = self._galaxy_collection_cache[collection]
except KeyError as key_err:
raise RuntimeError(
'There is no known source for {coll!s}'.
format(coll=collection),
) from key_err
display.vvvv(
"Fetching a collection tarball for '{collection!s}' from "
'Ansible Galaxy'.format(collection=collection),
)
try:
b_artifact_path = _download_file(
url,
self._b_working_directory,
expected_hash=sha256_hash,
validate_certs=self._validate_certs,
token=token,
) # type: bytes
except URLError as err:
raise AnsibleError(
'Failed to download collection tar '
"from '{coll_src!s}': {download_err!s}".
format(
coll_src=to_native(collection.src),
download_err=to_native(err),
),
) from err
except Exception as err:
raise AnsibleError(
'Failed to download collection tar '
"from '{coll_src!s}' due to the following unforeseen error: "
'{download_err!s}'.
format(
coll_src=to_native(collection.src),
download_err=to_native(err),
),
) from err
else:
display.vvv(
"Collection '{coll!s}' obtained from "
'server {server!s} {url!s}'.format(
coll=collection, server=collection.src or 'Galaxy',
url=collection.src.api_server if collection.src is not None
else '',
)
)
self._galaxy_artifact_cache[collection] = b_artifact_path
return b_artifact_path
def get_artifact_path(self, collection):
# type: (Collection) -> bytes
"""Given a concrete collection pointer, return a cached path.
If it's not yet on disk, this method downloads the artifact first.
"""
try:
return self._artifact_cache[collection.src]
except KeyError:
pass
# NOTE: SCM needs to be special-cased as it may contain either
# NOTE: one collection in its root, or a number of top-level
# NOTE: collection directories instead.
# NOTE: The idea is to store the SCM collection as unpacked
# NOTE: directory structure under the temporary location and use
# NOTE: a "virtual" collection that has pinned requirements on
# NOTE: the directories under that SCM checkout that correspond
# NOTE: to collections.
# NOTE: This brings us to the idea that we need two separate
# NOTE: virtual Requirement/Candidate types --
# NOTE: (single) dir + (multidir) subdirs
if collection.is_url:
display.vvvv(
"Collection requirement '{collection!s}' is a URL "
'to a tar artifact'.format(collection=collection.fqcn),
)
try:
b_artifact_path = _download_file(
collection.src,
self._b_working_directory,
expected_hash=None, # NOTE: URLs don't support checksums
validate_certs=self._validate_certs,
timeout=self.timeout
)
except Exception as err:
raise AnsibleError(
'Failed to download collection tar '
"from '{coll_src!s}': {download_err!s}".
format(
coll_src=to_native(collection.src),
download_err=to_native(err),
),
) from err
elif collection.is_scm:
b_artifact_path = _extract_collection_from_git(
collection.src,
collection.ver,
self._b_working_directory,
)
elif collection.is_file or collection.is_dir or collection.is_subdirs:
b_artifact_path = to_bytes(collection.src)
else:
# NOTE: This may happen `if collection.is_online_index_pointer`
raise RuntimeError(
'The artifact is of an unexpected type {art_type!s}'.
format(art_type=collection.type)
)
self._artifact_cache[collection.src] = b_artifact_path
return b_artifact_path
def get_artifact_path_from_unknown(self, collection):
# type: (Candidate) -> bytes
if collection.is_concrete_artifact:
return self.get_artifact_path(collection)
return self.get_galaxy_artifact_path(collection)
def _get_direct_collection_namespace(self, collection):
# type: (Candidate) -> t.Optional[str]
return self.get_direct_collection_meta(collection)['namespace'] # type: ignore[return-value]
def _get_direct_collection_name(self, collection):
# type: (Collection) -> t.Optional[str]
return self.get_direct_collection_meta(collection)['name'] # type: ignore[return-value]
def get_direct_collection_fqcn(self, collection):
# type: (Collection) -> t.Optional[str]
"""Extract FQCN from the given on-disk collection artifact.
If the collection is virtual, ``None`` is returned instead
of a string.
"""
if collection.is_virtual:
# NOTE: should it be something like "<virtual>"?
return None
return '.'.join(( # type: ignore[type-var]
self._get_direct_collection_namespace(collection), # type: ignore[arg-type]
self._get_direct_collection_name(collection),
))
def get_direct_collection_version(self, collection):
# type: (Collection) -> str
"""Extract version from the given on-disk collection artifact."""
return self.get_direct_collection_meta(collection)['version'] # type: ignore[return-value]
def get_direct_collection_dependencies(self, collection):
# type: (t.Union[Candidate, Requirement]) -> dict[str, str]
"""Extract deps from the given on-disk collection artifact."""
collection_dependencies = self.get_direct_collection_meta(collection)['dependencies']
if collection_dependencies is None:
collection_dependencies = {}
return collection_dependencies # type: ignore[return-value]
def get_direct_collection_meta(self, collection):
# type: (Collection) -> dict[str, t.Union[str, dict[str, str], list[str], None, t.Type[Sentinel]]]
"""Extract meta from the given on-disk collection artifact."""
try: # FIXME: use unique collection identifier as a cache key?
return self._artifact_meta_cache[collection.src]
except KeyError:
b_artifact_path = self.get_artifact_path(collection)
if collection.is_url or collection.is_file:
collection_meta = _get_meta_from_tar(b_artifact_path)
elif collection.is_dir: # should we just build a coll instead?
# FIXME: what if there's subdirs?
try:
collection_meta = _get_meta_from_dir(b_artifact_path, self.require_build_metadata)
except LookupError as lookup_err:
raise AnsibleError(
'Failed to find the collection dir deps: {err!s}'.
format(err=to_native(lookup_err)),
) from lookup_err
elif collection.is_scm:
collection_meta = {
'name': None,
'namespace': None,
'dependencies': {to_native(b_artifact_path): '*'},
'version': '*',
}
elif collection.is_subdirs:
collection_meta = {
'name': None,
'namespace': None,
# NOTE: Dropping b_artifact_path since it's based on src anyway
'dependencies': dict.fromkeys(
map(to_native, collection.namespace_collection_paths),
'*',
),
'version': '*',
}
else:
raise RuntimeError
self._artifact_meta_cache[collection.src] = collection_meta
return collection_meta
def save_collection_source(self, collection, url, sha256_hash, token, signatures_url, signatures):
# type: (Candidate, str, str, GalaxyToken, str, list[dict[str, str]]) -> None
"""Store collection URL, SHA256 hash and Galaxy API token.
This is a hook that is supposed to be called before attempting to
download Galaxy-based collections with ``get_galaxy_artifact_path()``.
"""
self._galaxy_collection_cache[collection] = url, sha256_hash, token
self._galaxy_collection_origin_cache[collection] = signatures_url, signatures
@classmethod
@contextmanager
def under_tmpdir(
cls,
temp_dir_base, # type: str
validate_certs=True, # type: bool
keyring=None, # type: str
required_signature_count=None, # type: str
ignore_signature_errors=None, # type: list[str]
require_build_metadata=True, # type: bool
): # type: (...) -> t.Iterator[ConcreteArtifactsManager]
"""Custom ConcreteArtifactsManager constructor with temp dir.
This method returns a context manager that allocates and cleans
up a temporary directory for caching the collection artifacts
during the dependency resolution process.
"""
# NOTE: Can't use `with tempfile.TemporaryDirectory:`
# NOTE: because it's not in Python 2 stdlib.
temp_path = mkdtemp(
dir=to_bytes(temp_dir_base, errors='surrogate_or_strict'),
)
b_temp_path = to_bytes(temp_path, errors='surrogate_or_strict')
try:
yield cls(
b_temp_path,
validate_certs,
keyring=keyring,
required_signature_count=required_signature_count,
ignore_signature_errors=ignore_signature_errors
)
finally:
rmtree(b_temp_path)
def parse_scm(collection, version):
"""Extract name, version, path and subdir out of the SCM pointer."""
if ',' in collection:
collection, version = collection.split(',', 1)
elif version == '*' or not version:
version = 'HEAD'
if collection.startswith('git+'):
path = collection[4:]
else:
path = collection
path, fragment = urldefrag(path)
fragment = fragment.strip(os.path.sep)
if path.endswith(os.path.sep + '.git'):
name = path.split(os.path.sep)[-2]
elif '://' not in path and '@' not in path:
name = path
else:
name = path.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
return name, version, path, fragment
def _extract_collection_from_git(repo_url, coll_ver, b_path):
name, version, git_url, fragment = parse_scm(repo_url, coll_ver)
b_checkout_path = mkdtemp(
dir=b_path,
prefix=to_bytes(name, errors='surrogate_or_strict'),
)
try:
git_executable = get_bin_path('git')
except ValueError as err:
raise AnsibleError(
"Could not find git executable to extract the collection from the Git repository `{repo_url!s}`.".
format(repo_url=to_native(git_url))
) from err
# Perform a shallow clone if simply cloning HEAD
if version == 'HEAD':
git_clone_cmd = [git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)]
else:
git_clone_cmd = [git_executable, 'clone', git_url, to_text(b_checkout_path)]
# FIXME: '--branch', version
if context.CLIARGS['ignore_certs'] or C.GALAXY_IGNORE_CERTS:
git_clone_cmd.extend(['-c', 'http.sslVerify=false'])
try:
subprocess.check_call(git_clone_cmd)
except subprocess.CalledProcessError as proc_err:
raise AnsibleError( # should probably be LookupError
'Failed to clone a Git repository from `{repo_url!s}`.'.
format(repo_url=to_native(git_url)),
) from proc_err
git_switch_cmd = git_executable, 'checkout', to_text(version)
try:
subprocess.check_call(git_switch_cmd, cwd=b_checkout_path)
except subprocess.CalledProcessError as proc_err:
raise AnsibleError( # should probably be LookupError
'Failed to switch a cloned Git repo `{repo_url!s}` '
'to the requested revision `{commitish!s}`.'.
format(
commitish=to_native(version),
repo_url=to_native(git_url),
),
) from proc_err
return (
os.path.join(b_checkout_path, to_bytes(fragment))
if fragment else b_checkout_path
)
# FIXME: use random subdirs while preserving the file names
@retry_with_delays_and_condition(
backoff_iterator=generate_jittered_backoff(retries=6, delay_base=2, delay_threshold=40),
should_retry_error=should_retry_error
)
def _download_file(url, b_path, expected_hash, validate_certs, token=None, timeout=60):
# type: (str, bytes, t.Optional[str], bool, GalaxyToken, int) -> bytes
# ^ NOTE: used in download and verify_collections ^
b_tarball_name = to_bytes(
url.rsplit('/', 1)[1], errors='surrogate_or_strict',
)
b_file_name = b_tarball_name[:-len('.tar.gz')]
b_tarball_dir = mkdtemp(
dir=b_path,
prefix=b'-'.join((b_file_name, b'')),
) # type: bytes
b_file_path = os.path.join(b_tarball_dir, b_tarball_name)
display.display("Downloading %s to %s" % (url, to_text(b_tarball_dir)))
# NOTE: Galaxy redirects downloads to S3 which rejects the request
# NOTE: if an Authorization header is attached so don't redirect it
try:
resp = open_url(
to_native(url, errors='surrogate_or_strict'),
validate_certs=validate_certs,
headers=None if token is None else token.headers(),
unredirected_headers=['Authorization'], http_agent=user_agent(),
timeout=timeout
)
except Exception as err:
raise AnsibleError(to_native(err), orig_exc=err)
with open(b_file_path, 'wb') as download_file: # type: t.BinaryIO
actual_hash = _consume_file(resp, write_to=download_file)
if expected_hash:
display.vvvv(
'Validating downloaded file hash {actual_hash!s} with '
'expected hash {expected_hash!s}'.
format(actual_hash=actual_hash, expected_hash=expected_hash)
)
if expected_hash != actual_hash:
raise AnsibleError('Mismatch artifact hash with downloaded file')
return b_file_path
def _consume_file(read_from, write_to=None):
# type: (t.BinaryIO, t.BinaryIO) -> str
bufsize = 65536
sha256_digest = sha256()
data = read_from.read(bufsize)
while data:
if write_to is not None:
write_to.write(data)
write_to.flush()
sha256_digest.update(data)
data = read_from.read(bufsize)
return sha256_digest.hexdigest()
def _normalize_galaxy_yml_manifest(
galaxy_yml, # type: dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
b_galaxy_yml_path, # type: bytes
require_build_metadata=True, # type: bool
):
# type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
galaxy_yml_schema = (
get_collections_galaxy_meta_info()
) # type: list[dict[str, t.Any]] # FIXME: <--
# FIXME: 👆maybe precise type: list[dict[str, t.Union[bool, str, list[str]]]]
mandatory_keys = set()
string_keys = set() # type: set[str]
list_keys = set() # type: set[str]
dict_keys = set() # type: set[str]
sentinel_keys = set() # type: set[str]
for info in galaxy_yml_schema:
if info.get('required', False):
mandatory_keys.add(info['key'])
key_list_type = {
'str': string_keys,
'list': list_keys,
'dict': dict_keys,
'sentinel': sentinel_keys,
}[info.get('type', 'str')]
key_list_type.add(info['key'])
all_keys = frozenset(mandatory_keys | string_keys | list_keys | dict_keys | sentinel_keys)
set_keys = set(galaxy_yml.keys())
missing_keys = mandatory_keys.difference(set_keys)
if missing_keys:
msg = (
"The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
% (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys)))
)
if require_build_metadata:
raise AnsibleError(msg)
display.warning(msg)
raise ValueError(msg)
extra_keys = set_keys.difference(all_keys)
if len(extra_keys) > 0:
display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
% (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
# Add the defaults if they have not been set
for optional_string in string_keys:
if optional_string not in galaxy_yml:
galaxy_yml[optional_string] = None
for optional_list in list_keys:
list_val = galaxy_yml.get(optional_list, None)
if list_val is None:
galaxy_yml[optional_list] = []
elif not isinstance(list_val, list):
galaxy_yml[optional_list] = [list_val] # type: ignore[list-item]
for optional_dict in dict_keys:
if optional_dict not in galaxy_yml:
galaxy_yml[optional_dict] = {}
for optional_sentinel in sentinel_keys:
if optional_sentinel not in galaxy_yml:
galaxy_yml[optional_sentinel] = Sentinel
# NOTE: `version: null` is only allowed for `galaxy.yml`
# NOTE: and not `MANIFEST.json`. The use-case for it is collections
# NOTE: that generate the version from Git before building a
# NOTE: distributable tarball artifact.
if not galaxy_yml.get('version'):
galaxy_yml['version'] = '*'
return galaxy_yml
def _get_meta_from_dir(
b_path, # type: bytes
require_build_metadata=True, # type: bool
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
try:
return _get_meta_from_installed_dir(b_path)
except LookupError:
return _get_meta_from_src_dir(b_path, require_build_metadata)
def _get_meta_from_src_dir(
b_path, # type: bytes
require_build_metadata=True, # type: bool
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
galaxy_yml = os.path.join(b_path, _GALAXY_YAML)
if not os.path.isfile(galaxy_yml):
raise LookupError(
"The collection galaxy.yml path '{path!s}' does not exist.".
format(path=to_native(galaxy_yml))
)
with open(galaxy_yml, 'rb') as manifest_file_obj:
try:
manifest = yaml_load(manifest_file_obj)
except yaml.error.YAMLError as yaml_err:
raise AnsibleError(
"Failed to parse the galaxy.yml at '{path!s}' with "
'the following error:\n{err_txt!s}'.
format(
path=to_native(galaxy_yml),
err_txt=to_native(yaml_err),
),
) from yaml_err
if not isinstance(manifest, dict):
if require_build_metadata:
raise AnsibleError(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.")
# Valid build metadata is not required by ansible-galaxy list. Raise ValueError to fall back to implicit metadata.
display.warning(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.")
raise ValueError(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.")
return _normalize_galaxy_yml_manifest(manifest, galaxy_yml, require_build_metadata)
def _get_json_from_installed_dir(
b_path, # type: bytes
filename, # type: str
): # type: (...) -> dict
b_json_filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
try:
with open(b_json_filepath, 'rb') as manifest_fd:
b_json_text = manifest_fd.read()
except (IOError, OSError):
raise LookupError(
"The collection {manifest!s} path '{path!s}' does not exist.".
format(
manifest=filename,
path=to_native(b_json_filepath),
)
)
manifest_txt = to_text(b_json_text, errors='surrogate_or_strict')
try:
manifest = json.loads(manifest_txt)
except ValueError:
raise AnsibleError(
'Collection tar file member {member!s} does not '
'contain a valid json string.'.
format(member=filename),
)
return manifest
def _get_meta_from_installed_dir(
b_path, # type: bytes
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
manifest = _get_json_from_installed_dir(b_path, MANIFEST_FILENAME)
collection_info = manifest['collection_info']
version = collection_info.get('version')
if not version:
raise AnsibleError(
u'Collection metadata file `{manifest_filename!s}` at `{meta_file!s}` is expected '
u'to have a valid SemVer version value but got {version!s}'.
format(
manifest_filename=MANIFEST_FILENAME,
meta_file=to_text(b_path),
version=to_text(repr(version)),
),
)
return collection_info
def _get_meta_from_tar(
b_path, # type: bytes
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
if not os.path.exists(b_path):
raise AnsibleError(
f"Unable to find collection artifact file at '{to_native(b_path)}'."
)
if not tarfile.is_tarfile(b_path):
raise AnsibleError(
"Collection artifact at '{path!s}' is not a valid tar file.".
format(path=to_native(b_path)),
)
with tarfile.open(b_path, mode='r') as collection_tar: # type: tarfile.TarFile
try:
member = collection_tar.getmember(MANIFEST_FILENAME)
except KeyError:
raise AnsibleError(
"Collection at '{path!s}' does not contain the "
'required file {manifest_file!s}.'.
format(
path=to_native(b_path),
manifest_file=MANIFEST_FILENAME,
),
)
with _tarfile_extract(collection_tar, member) as (_member, member_obj):
if member_obj is None:
raise AnsibleError(
'Collection tar file does not contain '
'member {member!s}'.format(member=MANIFEST_FILENAME),
)
text_content = to_text(
member_obj.read(),
errors='surrogate_or_strict',
)
try:
manifest = json.loads(text_content)
except ValueError:
raise AnsibleError(
'Collection tar file member {member!s} does not '
'contain a valid json string.'.
format(member=MANIFEST_FILENAME),
)
return manifest['collection_info']
@contextmanager
def _tarfile_extract(
tar, # type: tarfile.TarFile
member, # type: tarfile.TarInfo
):
# type: (...) -> t.Iterator[tuple[tarfile.TarInfo, t.Optional[t.IO[bytes]]]]
tar_obj = tar.extractfile(member)
try:
yield member, tar_obj
finally:
if tar_obj is not None:
tar_obj.close()
| 29,490
|
Python
|
.py
| 655
| 35.10687
| 152
| 0.612958
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,072
|
__init__.py
|
ansible_ansible/lib/ansible/galaxy/collection/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Installed collections management package."""
from __future__ import annotations
import errno
import fnmatch
import functools
import glob
import inspect
import json
import os
import pathlib
import queue
import re
import shutil
import stat
import sys
import tarfile
import tempfile
import textwrap
import threading
import time
import typing as t
from collections import namedtuple
from contextlib import contextmanager
from dataclasses import dataclass
from hashlib import sha256
from io import BytesIO
from importlib.metadata import distribution
from itertools import chain
try:
from packaging.requirements import Requirement as PkgReq
except ImportError:
class PkgReq: # type: ignore[no-redef]
pass
HAS_PACKAGING = False
else:
HAS_PACKAGING = True
try:
from distlib.manifest import Manifest # type: ignore[import]
from distlib import DistlibException # type: ignore[import]
except ImportError:
HAS_DISTLIB = False
else:
HAS_DISTLIB = True
if t.TYPE_CHECKING:
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
ManifestKeysType = t.Literal[
'collection_info', 'file_manifest_file', 'format',
]
FileMetaKeysType = t.Literal[
'name',
'ftype',
'chksum_type',
'chksum_sha256',
'format',
]
CollectionInfoKeysType = t.Literal[
# collection meta:
'namespace', 'name', 'version',
'authors', 'readme',
'tags', 'description',
'license', 'license_file',
'dependencies',
'repository', 'documentation',
'homepage', 'issues',
# files meta:
FileMetaKeysType,
]
ManifestValueType = t.Dict[CollectionInfoKeysType, t.Union[int, str, t.List[str], t.Dict[str, str], None]]
CollectionManifestType = t.Dict[ManifestKeysType, ManifestValueType]
FileManifestEntryType = t.Dict[FileMetaKeysType, t.Union[str, int, None]]
FilesManifestType = t.Dict[t.Literal['files', 'format'], t.Union[t.List[FileManifestEntryType], int]]
import ansible.constants as C
from ansible.compat.importlib_resources import files
from ansible.errors import AnsibleError
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection.concrete_artifact_manager import (
_consume_file,
_download_file,
_get_json_from_installed_dir,
_get_meta_from_src_dir,
_tarfile_extract,
)
from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
from ansible.galaxy.collection.gpg import (
run_gpg_verify,
parse_gpg_errors,
get_signature_from_source,
GPG_ERROR_MAP,
)
try:
from ansible.galaxy.dependency_resolution import (
build_collection_dependency_resolver,
)
from ansible.galaxy.dependency_resolution.errors import (
CollectionDependencyResolutionImpossible,
CollectionDependencyInconsistentCandidate,
)
from ansible.galaxy.dependency_resolution.providers import (
RESOLVELIB_VERSION,
RESOLVELIB_LOWERBOUND,
RESOLVELIB_UPPERBOUND,
)
except ImportError:
HAS_RESOLVELIB = False
else:
HAS_RESOLVELIB = True
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate, Requirement, _is_installed_collection_dir,
)
from ansible.galaxy.dependency_resolution.versioning import meets_requirements
from ansible.plugins.loader import get_all_plugin_loaders
from ansible.module_utils.common.file import S_IRWU_RG_RO, S_IRWXU_RXG_RXO, S_IXANY
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.yaml import yaml_dump
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash, secure_hash_s
display = Display()
MANIFEST_FORMAT = 1
MANIFEST_FILENAME = 'MANIFEST.json'
ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
SIGNATURE_COUNT_RE = r"^(?P<strict>\+)?(?:(?P<count>\d+)|(?P<all>all))$"
@dataclass
class ManifestControl:
directives: list[str] = None
omit_default_directives: bool = False
def __post_init__(self):
# Allow a dict representing this dataclass to be splatted directly.
# Requires attrs to have a default value, so anything with a default
# of None is swapped for its, potentially mutable, default
for field_name, field_type in inspect.get_annotations(type(self), eval_str=True).items():
if getattr(self, field_name) is None:
super().__setattr__(field_name, field_type())
class CollectionSignatureError(Exception):
def __init__(self, reasons=None, stdout=None, rc=None, ignore=False):
self.reasons = reasons
self.stdout = stdout
self.rc = rc
self.ignore = ignore
self._reason_wrapper = None
def _report_unexpected(self, collection_name):
return (
f"Unexpected error for '{collection_name}': "
f"GnuPG signature verification failed with the return code {self.rc} and output {self.stdout}"
)
def _report_expected(self, collection_name):
header = f"Signature verification failed for '{collection_name}' (return code {self.rc}):"
return header + self._format_reasons()
def _format_reasons(self):
if self._reason_wrapper is None:
self._reason_wrapper = textwrap.TextWrapper(
initial_indent=" * ", # 6 chars
subsequent_indent=" ", # 6 chars
)
wrapped_reasons = [
'\n'.join(self._reason_wrapper.wrap(reason))
for reason in self.reasons
]
return '\n' + '\n'.join(wrapped_reasons)
def report(self, collection_name):
if self.reasons:
return self._report_expected(collection_name)
return self._report_unexpected(collection_name)
# FUTURE: expose actual verify result details for a collection on this object, maybe reimplement as dataclass on py3.8+
class CollectionVerifyResult:
def __init__(self, collection_name): # type: (str) -> None
self.collection_name = collection_name # type: str
self.success = True # type: bool
def verify_local_collection(local_collection, remote_collection, artifacts_manager):
# type: (Candidate, t.Optional[Candidate], ConcreteArtifactsManager) -> CollectionVerifyResult
"""Verify integrity of the locally installed collection.
:param local_collection: Collection being checked.
:param remote_collection: Upstream collection (optional, if None, only verify local artifact)
:param artifacts_manager: Artifacts manager.
:return: a collection verify result object.
"""
result = CollectionVerifyResult(local_collection.fqcn)
b_collection_path = to_bytes(local_collection.src, errors='surrogate_or_strict')
display.display("Verifying '{coll!s}'.".format(coll=local_collection))
display.display(
u"Installed collection found at '{path!s}'".
format(path=to_text(local_collection.src)),
)
modified_content = [] # type: list[ModifiedContent]
verify_local_only = remote_collection is None
# partial away the local FS detail so we can just ask generically during validation
get_json_from_validation_source = functools.partial(_get_json_from_installed_dir, b_collection_path)
get_hash_from_validation_source = functools.partial(_get_file_hash, b_collection_path)
if not verify_local_only:
# Compare installed version versus requirement version
if local_collection.ver != remote_collection.ver:
err = (
"{local_fqcn!s} has the version '{local_ver!s}' but "
"is being compared to '{remote_ver!s}'".format(
local_fqcn=local_collection.fqcn,
local_ver=local_collection.ver,
remote_ver=remote_collection.ver,
)
)
display.display(err)
result.success = False
return result
manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME)
signatures = list(local_collection.signatures)
if verify_local_only and local_collection.source_info is not None:
signatures = [info["signature"] for info in local_collection.source_info["signatures"]] + signatures
elif not verify_local_only and remote_collection.signatures:
signatures = list(remote_collection.signatures) + signatures
keyring_configured = artifacts_manager.keyring is not None
if not keyring_configured and signatures:
display.warning(
"The GnuPG keyring used for collection signature "
"verification was not configured but signatures were "
"provided by the Galaxy server. "
"Configure a keyring for ansible-galaxy to verify "
"the origin of the collection. "
"Skipping signature verification."
)
elif keyring_configured:
if not verify_file_signatures(
local_collection.fqcn,
manifest_file,
signatures,
artifacts_manager.keyring,
artifacts_manager.required_successful_signature_count,
artifacts_manager.ignore_signature_errors,
):
result.success = False
return result
display.vvvv(f"GnuPG signature verification succeeded, verifying contents of {local_collection}")
if verify_local_only:
# since we're not downloading this, just seed it with the value from disk
manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
elif keyring_configured and remote_collection.signatures:
manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
else:
# fetch remote
# NOTE: AnsibleError is raised on URLError
b_temp_tar_path = artifacts_manager.get_artifact_path_from_unknown(remote_collection)
display.vvv(
u"Remote collection cached as '{path!s}'".format(path=to_text(b_temp_tar_path))
)
# partial away the tarball details so we can just ask generically during validation
get_json_from_validation_source = functools.partial(_get_json_from_tar_file, b_temp_tar_path)
get_hash_from_validation_source = functools.partial(_get_tar_file_hash, b_temp_tar_path)
# Verify the downloaded manifest hash matches the installed copy before verifying the file manifest
manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
_verify_file_hash(b_collection_path, MANIFEST_FILENAME, manifest_hash, modified_content)
display.display('MANIFEST.json hash: {manifest_hash}'.format(manifest_hash=manifest_hash))
manifest = get_json_from_validation_source(MANIFEST_FILENAME)
# Use the manifest to verify the file manifest checksum
file_manifest_data = manifest['file_manifest_file']
file_manifest_filename = file_manifest_data['name']
expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
# Verify the file manifest before using it to verify individual files
_verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
file_manifest = get_json_from_validation_source(file_manifest_filename)
collection_dirs = set()
collection_files = {
os.path.join(b_collection_path, b'MANIFEST.json'),
os.path.join(b_collection_path, b'FILES.json'),
}
# Use the file manifest to verify individual file checksums
for manifest_data in file_manifest['files']:
name = manifest_data['name']
if manifest_data['ftype'] == 'file':
collection_files.add(
os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict'))
)
expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
_verify_file_hash(b_collection_path, name, expected_hash, modified_content)
if manifest_data['ftype'] == 'dir':
collection_dirs.add(
os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict'))
)
b_ignore_patterns = [
b'*.pyc',
]
# Find any paths not in the FILES.json
for root, dirs, files in os.walk(b_collection_path):
for name in files:
full_path = os.path.join(root, name)
path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict')
if any(fnmatch.fnmatch(full_path, b_pattern) for b_pattern in b_ignore_patterns):
display.v("Ignoring verification for %s" % full_path)
continue
if full_path not in collection_files:
modified_content.append(
ModifiedContent(filename=path, expected='the file does not exist', installed='the file exists')
)
for name in dirs:
full_path = os.path.join(root, name)
path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict')
if full_path not in collection_dirs:
modified_content.append(
ModifiedContent(filename=path, expected='the directory does not exist', installed='the directory exists')
)
if modified_content:
result.success = False
display.display(
'Collection {fqcn!s} contains modified content '
'in the following files:'.
format(fqcn=to_text(local_collection.fqcn)),
)
for content_change in modified_content:
display.display(' %s' % content_change.filename)
display.v(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
else:
what = "are internally consistent with its manifest" if verify_local_only else "match the remote collection"
display.display(
"Successfully verified that checksums for '{coll!s}' {what!s}.".
format(coll=local_collection, what=what),
)
return result
def verify_file_signatures(fqcn, manifest_file, detached_signatures, keyring, required_successful_count, ignore_signature_errors):
# type: (str, str, list[str], str, str, list[str]) -> bool
successful = 0
error_messages = []
signature_count_requirements = re.match(SIGNATURE_COUNT_RE, required_successful_count).groupdict()
strict = signature_count_requirements['strict'] or False
require_all = signature_count_requirements['all']
require_count = signature_count_requirements['count']
if require_count is not None:
require_count = int(require_count)
for signature in detached_signatures:
signature = to_text(signature, errors='surrogate_or_strict')
try:
verify_file_signature(manifest_file, signature, keyring, ignore_signature_errors)
except CollectionSignatureError as error:
if error.ignore:
# Do not include ignored errors in either the failed or successful count
continue
error_messages.append(error.report(fqcn))
else:
successful += 1
if require_all:
continue
if successful == require_count:
break
if strict and not successful:
verified = False
display.display(f"Signature verification failed for '{fqcn}': no successful signatures")
elif require_all:
verified = not error_messages
if not verified:
display.display(f"Signature verification failed for '{fqcn}': some signatures failed")
else:
verified = not detached_signatures or require_count == successful
if not verified:
display.display(f"Signature verification failed for '{fqcn}': fewer successful signatures than required")
if not verified:
for msg in error_messages:
display.vvvv(msg)
return verified
def verify_file_signature(manifest_file, detached_signature, keyring, ignore_signature_errors):
# type: (str, str, str, list[str]) -> None
"""Run the gpg command and parse any errors. Raises CollectionSignatureError on failure."""
gpg_result, gpg_verification_rc = run_gpg_verify(manifest_file, detached_signature, keyring, display)
if gpg_result:
errors = parse_gpg_errors(gpg_result)
try:
error = next(errors)
except StopIteration:
pass
else:
reasons = []
ignored_reasons = 0
for error in chain([error], errors):
# Get error status (dict key) from the class (dict value)
status_code = list(GPG_ERROR_MAP.keys())[list(GPG_ERROR_MAP.values()).index(error.__class__)]
if status_code in ignore_signature_errors:
ignored_reasons += 1
reasons.append(error.get_gpg_error_description())
ignore = len(reasons) == ignored_reasons
raise CollectionSignatureError(reasons=set(reasons), stdout=gpg_result, rc=gpg_verification_rc, ignore=ignore)
if gpg_verification_rc:
raise CollectionSignatureError(stdout=gpg_result, rc=gpg_verification_rc)
# No errors and rc is 0, verify was successful
return None
def build_collection(u_collection_path, u_output_path, force):
# type: (str, str, bool) -> str
"""Creates the Ansible collection artifact in a .tar.gz file.
:param u_collection_path: The path to the collection to build. This should be the directory that contains the
galaxy.yml file.
:param u_output_path: The path to create the collection build artifact. This should be a directory.
:param force: Whether to overwrite an existing collection build artifact or fail.
:return: The path to the collection build artifact.
"""
b_collection_path = to_bytes(u_collection_path, errors='surrogate_or_strict')
try:
collection_meta = _get_meta_from_src_dir(b_collection_path)
except LookupError as lookup_err:
raise AnsibleError(to_native(lookup_err)) from lookup_err
collection_manifest = _build_manifest(**collection_meta)
file_manifest = _build_files_manifest(
b_collection_path,
collection_meta['namespace'], # type: ignore[arg-type]
collection_meta['name'], # type: ignore[arg-type]
collection_meta['build_ignore'], # type: ignore[arg-type]
collection_meta['manifest'], # type: ignore[arg-type]
collection_meta['license_file'], # type: ignore[arg-type]
)
artifact_tarball_file_name = '{ns!s}-{name!s}-{ver!s}.tar.gz'.format(
name=collection_meta['name'],
ns=collection_meta['namespace'],
ver=collection_meta['version'],
)
b_collection_output = os.path.join(
to_bytes(u_output_path),
to_bytes(artifact_tarball_file_name, errors='surrogate_or_strict'),
)
if os.path.exists(b_collection_output):
if os.path.isdir(b_collection_output):
raise AnsibleError("The output collection artifact '%s' already exists, "
"but is a directory - aborting" % to_native(b_collection_output))
elif not force:
raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
"the collection artifact." % to_native(b_collection_output))
collection_output = _build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
return collection_output
def download_collections(
collections, # type: t.Iterable[Requirement]
output_path, # type: str
apis, # type: t.Iterable[GalaxyAPI]
no_deps, # type: bool
allow_pre_release, # type: bool
artifacts_manager, # type: ConcreteArtifactsManager
): # type: (...) -> None
"""Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
file of the downloaded requirements to be used for an install.
:param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
:param output_path: The path to download the collections to.
:param apis: A list of GalaxyAPIs to query when search for a collection.
:param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
:param no_deps: Ignore any collection dependencies and only download the base requirements.
:param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
"""
with _display_progress("Process download dependency map"):
dep_map = _resolve_depenency_map(
set(collections),
galaxy_apis=apis,
preferred_candidates=None,
concrete_artifacts_manager=artifacts_manager,
no_deps=no_deps,
allow_pre_release=allow_pre_release,
upgrade=False,
# Avoid overhead getting signatures since they are not currently applicable to downloaded collections
include_signatures=False,
offline=False,
)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
requirements = []
with _display_progress(
"Starting collection download process to '{path!s}'".
format(path=output_path),
):
for fqcn, concrete_coll_pin in dep_map.copy().items(): # FIXME: move into the provider
if concrete_coll_pin.is_virtual:
display.display(
'{coll!s} is not downloadable'.
format(coll=to_text(concrete_coll_pin)),
)
continue
display.display(
u"Downloading collection '{coll!s}' to '{path!s}'".
format(coll=to_text(concrete_coll_pin), path=to_text(b_output_path)),
)
b_src_path = artifacts_manager.get_artifact_path_from_unknown(concrete_coll_pin)
b_dest_path = os.path.join(
b_output_path,
os.path.basename(b_src_path),
)
if concrete_coll_pin.is_dir:
b_dest_path = to_bytes(
build_collection(
to_text(b_src_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force=True,
),
errors='surrogate_or_strict',
)
else:
shutil.copy(to_native(b_src_path), to_native(b_dest_path))
display.display(
"Collection '{coll!s}' was downloaded successfully".
format(coll=concrete_coll_pin),
)
requirements.append({
# FIXME: Consider using a more specific upgraded format
# FIXME: having FQCN in the name field, with src field
# FIXME: pointing to the file path, and explicitly set
# FIXME: type. If version and name are set, it'd
# FIXME: perform validation against the actual metadata
# FIXME: in the artifact src points at.
'name': to_native(os.path.basename(b_dest_path)),
'version': concrete_coll_pin.ver,
})
requirements_path = os.path.join(output_path, 'requirements.yml')
b_requirements_path = to_bytes(
requirements_path, errors='surrogate_or_strict',
)
display.display(
u'Writing requirements.yml file of downloaded collections '
"to '{path!s}'".format(path=to_text(requirements_path)),
)
yaml_bytes = to_bytes(
yaml_dump({'collections': requirements}),
errors='surrogate_or_strict',
)
with open(b_requirements_path, mode='wb') as req_fd:
req_fd.write(yaml_bytes)
def publish_collection(collection_path, api, wait, timeout):
"""Publish an Ansible collection tarball into an Ansible Galaxy server.
:param collection_path: The path to the collection tarball to publish.
:param api: A GalaxyAPI to publish the collection to.
:param wait: Whether to wait until the import process is complete.
:param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
"""
import_uri = api.publish_collection(collection_path)
if wait:
# Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
# always the task_id, though.
# v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
# v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
task_id = None
for path_segment in reversed(import_uri.split('/')):
if path_segment:
task_id = path_segment
break
if not task_id:
raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
with _display_progress(
"Collection has been published to the Galaxy server "
"{api.name!s} {api.api_server!s}".format(api=api),
):
api.wait_import_task(task_id, timeout)
display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
% (api.name, api.api_server))
else:
display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
"completed due to --no-wait being set. Import task results can be found at %s"
% (api.name, api.api_server, import_uri))
def install_collections(
collections, # type: t.Iterable[Requirement]
output_path, # type: str
apis, # type: t.Iterable[GalaxyAPI]
ignore_errors, # type: bool
no_deps, # type: bool
force, # type: bool
force_deps, # type: bool
upgrade, # type: bool
allow_pre_release, # type: bool
artifacts_manager, # type: ConcreteArtifactsManager
disable_gpg_verify, # type: bool
offline, # type: bool
read_requirement_paths, # type: set[str]
): # type: (...) -> None
"""Install Ansible collections to the path specified.
:param collections: The collections to install.
:param output_path: The path to install the collections to.
:param apis: A list of GalaxyAPIs to query when searching for a collection.
:param validate_certs: Whether to validate the certificates if downloading a tarball.
:param ignore_errors: Whether to ignore any errors when installing the collection.
:param no_deps: Ignore any collection dependencies and only install the base requirements.
:param force: Re-install a collection if it has already been installed.
:param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
"""
existing_collections = {
Requirement(coll.fqcn, coll.ver, coll.src, coll.type, None)
for path in {output_path} | read_requirement_paths
for coll in find_existing_collections(path, artifacts_manager)
}
unsatisfied_requirements = set(
chain.from_iterable(
(
Requirement.from_dir_path(to_bytes(sub_coll), artifacts_manager)
for sub_coll in (
artifacts_manager.
get_direct_collection_dependencies(install_req).
keys()
)
)
if install_req.is_subdirs else (install_req, )
for install_req in collections
),
)
requested_requirements_names = {req.fqcn for req in unsatisfied_requirements}
# NOTE: Don't attempt to reevaluate already installed deps
# NOTE: unless `--force` or `--force-with-deps` is passed
unsatisfied_requirements -= set() if force or force_deps else {
req
for req in unsatisfied_requirements
for exs in existing_collections
if req.fqcn == exs.fqcn and meets_requirements(exs.ver, req.ver)
}
if not unsatisfied_requirements and not upgrade:
display.display(
'Nothing to do. All requested collections are already '
'installed. If you want to reinstall them, '
'consider using `--force`.'
)
return
# FIXME: This probably needs to be improved to
# FIXME: properly match differing src/type.
existing_non_requested_collections = {
coll for coll in existing_collections
if coll.fqcn not in requested_requirements_names
}
preferred_requirements = (
[] if force_deps
else existing_non_requested_collections if force
else existing_collections
)
preferred_collections = {
# NOTE: No need to include signatures if the collection is already installed
Candidate(coll.fqcn, coll.ver, coll.src, coll.type, None)
for coll in preferred_requirements
}
with _display_progress("Process install dependency map"):
dependency_map = _resolve_depenency_map(
collections,
galaxy_apis=apis,
preferred_candidates=preferred_collections,
concrete_artifacts_manager=artifacts_manager,
no_deps=no_deps,
allow_pre_release=allow_pre_release,
upgrade=upgrade,
include_signatures=not disable_gpg_verify,
offline=offline,
)
keyring_exists = artifacts_manager.keyring is not None
with _display_progress("Starting collection install process"):
for fqcn, concrete_coll_pin in dependency_map.items():
if concrete_coll_pin.is_virtual:
display.vvvv(
"Encountered {coll!s}, skipping.".
format(coll=to_text(concrete_coll_pin)),
)
continue
if concrete_coll_pin in preferred_collections:
display.display(
"'{coll!s}' is already installed, skipping.".
format(coll=to_text(concrete_coll_pin)),
)
continue
if not disable_gpg_verify and concrete_coll_pin.signatures and not keyring_exists:
# Duplicate warning msgs are not displayed
display.warning(
"The GnuPG keyring used for collection signature "
"verification was not configured but signatures were "
"provided by the Galaxy server to verify authenticity. "
"Configure a keyring for ansible-galaxy to use "
"or disable signature verification. "
"Skipping signature verification."
)
if concrete_coll_pin.type == 'galaxy':
concrete_coll_pin = concrete_coll_pin.with_signatures_repopulated()
try:
install(concrete_coll_pin, output_path, artifacts_manager)
except AnsibleError as err:
if ignore_errors:
display.warning(
'Failed to install collection {coll!s} but skipping '
'due to --ignore-errors being set. Error: {error!s}'.
format(
coll=to_text(concrete_coll_pin),
error=to_text(err),
)
)
else:
raise
# NOTE: imported in ansible.cli.galaxy
def validate_collection_name(name): # type: (str) -> str
"""Validates the collection name as an input from the user or a requirements file fit the requirements.
:param name: The input name with optional range specifier split by ':'.
:return: The input value, required for argparse validation.
"""
collection, dummy, dummy = name.partition(':')
if AnsibleCollectionRef.is_valid_collection_name(collection):
return name
raise AnsibleError("Invalid collection name '%s', "
"name must be in the format <namespace>.<collection>. \n"
"Please make sure namespace and collection name contains "
"characters from [a-zA-Z0-9_] only." % name)
# NOTE: imported in ansible.cli.galaxy
def validate_collection_path(collection_path): # type: (str) -> str
"""Ensure a given path ends with 'ansible_collections'
:param collection_path: The path that should end in 'ansible_collections'
:return: collection_path ending in 'ansible_collections' if it does not already.
"""
if os.path.split(collection_path)[1] != 'ansible_collections':
return os.path.join(collection_path, 'ansible_collections')
return collection_path
def verify_collections(
collections, # type: t.Iterable[Requirement]
search_paths, # type: t.Iterable[str]
apis, # type: t.Iterable[GalaxyAPI]
ignore_errors, # type: bool
local_verify_only, # type: bool
artifacts_manager, # type: ConcreteArtifactsManager
): # type: (...) -> list[CollectionVerifyResult]
r"""Verify the integrity of locally installed collections.
:param collections: The collections to check.
:param search_paths: Locations for the local collection lookup.
:param apis: A list of GalaxyAPIs to query when searching for a collection.
:param ignore_errors: Whether to ignore any errors when verifying the collection.
:param local_verify_only: When True, skip downloads and only verify local manifests.
:param artifacts_manager: Artifacts manager.
:return: list of CollectionVerifyResult objects describing the results of each collection verification
"""
results = [] # type: list[CollectionVerifyResult]
api_proxy = MultiGalaxyAPIProxy(apis, artifacts_manager)
with _display_progress():
for collection in collections:
try:
if collection.is_concrete_artifact:
raise AnsibleError(
message="'{coll_type!s}' type is not supported. "
'The format namespace.name is expected.'.
format(coll_type=collection.type)
)
# NOTE: Verify local collection exists before
# NOTE: downloading its source artifact from
# NOTE: a galaxy server.
default_err = 'Collection %s is not installed in any of the collection paths.' % collection.fqcn
for search_path in search_paths:
b_search_path = to_bytes(
os.path.join(
search_path,
collection.namespace, collection.name,
),
errors='surrogate_or_strict',
)
if not os.path.isdir(b_search_path):
continue
if not _is_installed_collection_dir(b_search_path):
default_err = (
"Collection %s does not have a MANIFEST.json. "
"A MANIFEST.json is expected if the collection has been built "
"and installed via ansible-galaxy" % collection.fqcn
)
continue
local_collection = Candidate.from_dir_path(
b_search_path, artifacts_manager,
)
supplemental_signatures = [
get_signature_from_source(source, display)
for source in collection.signature_sources or []
]
local_collection = Candidate(
local_collection.fqcn,
local_collection.ver,
local_collection.src,
local_collection.type,
signatures=frozenset(supplemental_signatures),
)
break
else:
raise AnsibleError(message=default_err)
if local_verify_only:
remote_collection = None
else:
signatures = api_proxy.get_signatures(local_collection)
signatures.extend([
get_signature_from_source(source, display)
for source in collection.signature_sources or []
])
remote_collection = Candidate(
collection.fqcn,
collection.ver if collection.ver != '*'
else local_collection.ver,
None, 'galaxy',
frozenset(signatures),
)
# Download collection on a galaxy server for comparison
try:
# NOTE: If there are no signatures, trigger the lookup. If found,
# NOTE: it'll cache download URL and token in artifact manager.
# NOTE: If there are no Galaxy server signatures, only user-provided signature URLs,
# NOTE: those alone validate the MANIFEST.json and the remote collection is not downloaded.
# NOTE: The remote MANIFEST.json is only used in verification if there are no signatures.
if artifacts_manager.keyring is None or not signatures:
api_proxy.get_collection_version_metadata(
remote_collection,
)
except AnsibleError as e: # FIXME: does this actually emit any errors?
# FIXME: extract the actual message and adjust this:
expected_error_msg = (
'Failed to find collection {coll.fqcn!s}:{coll.ver!s}'.
format(coll=collection)
)
if e.message == expected_error_msg:
raise AnsibleError(
'Failed to find remote collection '
"'{coll!s}' on any of the galaxy servers".
format(coll=collection)
)
raise
result = verify_local_collection(local_collection, remote_collection, artifacts_manager)
results.append(result)
except AnsibleError as err:
if ignore_errors:
display.warning(
"Failed to verify collection '{coll!s}' but skipping "
'due to --ignore-errors being set. '
'Error: {err!s}'.
format(coll=collection, err=to_text(err)),
)
else:
raise
return results
@contextmanager
def _tempdir():
b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
try:
yield b_temp_path
finally:
shutil.rmtree(b_temp_path)
@contextmanager
def _display_progress(msg=None):
config_display = C.GALAXY_DISPLAY_PROGRESS
display_wheel = sys.stdout.isatty() if config_display is None else config_display
global display
if msg is not None:
display.display(msg)
if not display_wheel:
yield
return
def progress(display_queue, actual_display):
actual_display.debug("Starting display_progress display thread")
t = threading.current_thread()
while True:
for c in "|/-\\":
actual_display.display(c + "\b", newline=False)
time.sleep(0.1)
# Display a message from the main thread
while True:
try:
method, args, kwargs = display_queue.get(block=False, timeout=0.1)
except queue.Empty:
break
else:
func = getattr(actual_display, method)
func(*args, **kwargs)
if getattr(t, "finish", False):
actual_display.debug("Received end signal for display_progress display thread")
return
class DisplayThread(object):
def __init__(self, display_queue):
self.display_queue = display_queue
def __getattr__(self, attr):
def call_display(*args, **kwargs):
self.display_queue.put((attr, args, kwargs))
return call_display
# Temporary override the global display class with our own which add the calls to a queue for the thread to call.
old_display = display
try:
display_queue = queue.Queue()
display = DisplayThread(display_queue)
t = threading.Thread(target=progress, args=(display_queue, old_display))
t.daemon = True
t.start()
try:
yield
finally:
t.finish = True
t.join()
except Exception:
# The exception is re-raised so we can sure the thread is finished and not using the display anymore
raise
finally:
display = old_display
def _verify_file_hash(b_path, filename, expected_hash, error_queue):
b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
if not os.path.isfile(b_file_path):
actual_hash = None
else:
with open(b_file_path, mode='rb') as file_object:
actual_hash = _consume_file(file_object)
if expected_hash != actual_hash:
error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
def _make_manifest():
return {
'files': [
{
'name': '.',
'ftype': 'dir',
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT,
},
],
'format': MANIFEST_FORMAT,
}
def _make_entry(name, ftype, chksum_type='sha256', chksum=None):
return {
'name': name,
'ftype': ftype,
'chksum_type': chksum_type if chksum else None,
f'chksum_{chksum_type}': chksum,
'format': MANIFEST_FORMAT
}
def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns,
manifest_control, license_file):
# type: (bytes, str, str, list[str], dict[str, t.Any], t.Optional[str]) -> FilesManifestType
if ignore_patterns and manifest_control is not Sentinel:
raise AnsibleError('"build_ignore" and "manifest" are mutually exclusive')
if manifest_control is not Sentinel:
return _build_files_manifest_distlib(
b_collection_path,
namespace,
name,
manifest_control,
license_file,
)
return _build_files_manifest_walk(b_collection_path, namespace, name, ignore_patterns)
def _build_files_manifest_distlib(b_collection_path, namespace, name, manifest_control,
license_file):
# type: (bytes, str, str, dict[str, t.Any], t.Optional[str]) -> FilesManifestType
if not HAS_DISTLIB:
raise AnsibleError('Use of "manifest" requires the python "distlib" library')
if manifest_control is None:
manifest_control = {}
try:
control = ManifestControl(**manifest_control)
except TypeError as ex:
raise AnsibleError(f'Invalid "manifest" provided: {ex}')
if not is_sequence(control.directives):
raise AnsibleError(f'"manifest.directives" must be a list, got: {control.directives.__class__.__name__}')
if not isinstance(control.omit_default_directives, bool):
raise AnsibleError(
'"manifest.omit_default_directives" is expected to be a boolean, got: '
f'{control.omit_default_directives.__class__.__name__}'
)
if control.omit_default_directives and not control.directives:
raise AnsibleError(
'"manifest.omit_default_directives" was set to True, but no directives were defined '
'in "manifest.directives". This would produce an empty collection artifact.'
)
directives = []
if control.omit_default_directives:
directives.extend(control.directives)
else:
directives.extend([
'include meta/*.yml',
'include *.txt *.md *.rst *.license COPYING LICENSE',
'recursive-include .reuse **',
'recursive-include LICENSES **',
'recursive-include tests **',
'recursive-include docs **.rst **.yml **.yaml **.json **.j2 **.txt **.license',
'recursive-include roles **.yml **.yaml **.json **.j2 **.license',
'recursive-include playbooks **.yml **.yaml **.json **.license',
'recursive-include changelogs **.yml **.yaml **.license',
'recursive-include plugins */**.py */**.license',
])
if license_file:
directives.append(f'include {license_file}')
plugins = set(l.package.split('.')[-1] for d, l in get_all_plugin_loaders())
for plugin in sorted(plugins):
if plugin in ('modules', 'module_utils'):
continue
elif plugin in C.DOCUMENTABLE_PLUGINS:
directives.append(
f'recursive-include plugins/{plugin} **.yml **.yaml'
)
directives.extend([
'recursive-include plugins/modules **.ps1 **.yml **.yaml **.license',
'recursive-include plugins/module_utils **.ps1 **.psm1 **.cs **.license',
])
directives.extend(control.directives)
directives.extend([
f'exclude galaxy.yml galaxy.yaml MANIFEST.json FILES.json {namespace}-{name}-*.tar.gz',
'recursive-exclude tests/output **',
'global-exclude /.* /__pycache__ *.pyc *.pyo *.bak *~ *.swp',
])
display.vvv('Manifest Directives:')
display.vvv(textwrap.indent('\n'.join(directives), ' '))
u_collection_path = to_text(b_collection_path, errors='surrogate_or_strict')
m = Manifest(u_collection_path)
for directive in directives:
try:
m.process_directive(directive)
except DistlibException as e:
raise AnsibleError(f'Invalid manifest directive: {e}')
except Exception as e:
raise AnsibleError(f'Unknown error processing manifest directive: {e}')
manifest = _make_manifest()
for abs_path in m.sorted(wantdirs=True):
rel_path = os.path.relpath(abs_path, u_collection_path)
if os.path.isdir(abs_path):
manifest_entry = _make_entry(rel_path, 'dir')
else:
manifest_entry = _make_entry(
rel_path,
'file',
chksum_type='sha256',
chksum=secure_hash(abs_path, hash_func=sha256)
)
manifest['files'].append(manifest_entry)
return manifest
def _build_files_manifest_walk(b_collection_path, namespace, name, ignore_patterns):
# type: (bytes, str, str, list[str]) -> FilesManifestType
# We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
# patterns can be extended by the build_ignore key in galaxy.yml
b_ignore_patterns = [
b'MANIFEST.json',
b'FILES.json',
b'galaxy.yml',
b'galaxy.yaml',
b'.git',
b'*.pyc',
b'*.retry',
b'tests/output', # Ignore ansible-test result output directory.
to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
]
b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
manifest = _make_manifest()
def _discover_relative_base_directory(b_path: bytes, b_top_level_dir: bytes) -> bytes:
if b_path == b_top_level_dir:
return b''
common_prefix = os.path.commonpath((b_top_level_dir, b_path))
b_rel_base_dir = os.path.relpath(b_path, common_prefix)
return b_rel_base_dir.lstrip(os.path.sep.encode())
def _walk(b_path, b_top_level_dir):
b_rel_base_dir = _discover_relative_base_directory(b_path, b_top_level_dir)
for b_item in os.listdir(b_path):
b_abs_path = os.path.join(b_path, b_item)
b_rel_path = os.path.join(b_rel_base_dir, b_item)
rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
if os.path.isdir(b_abs_path):
if any(b_item == b_path for b_path in b_ignore_dirs) or \
any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
if os.path.islink(b_abs_path):
b_link_target = os.path.realpath(b_abs_path)
if not _is_child_path(b_link_target, b_top_level_dir):
display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
% to_text(b_abs_path))
continue
manifest['files'].append(_make_entry(rel_path, 'dir'))
if not os.path.islink(b_abs_path):
_walk(b_abs_path, b_top_level_dir)
else:
if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
# Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for
# a normal file.
manifest['files'].append(
_make_entry(
rel_path,
'file',
chksum_type='sha256',
chksum=secure_hash(b_abs_path, hash_func=sha256)
)
)
_walk(b_collection_path, b_collection_path)
return manifest
# FIXME: accept a dict produced from `galaxy.yml` instead of separate args
def _build_manifest(namespace, name, version, authors, readme, tags, description, license_file,
dependencies, repository, documentation, homepage, issues, **kwargs):
manifest = {
'collection_info': {
'namespace': namespace,
'name': name,
'version': version,
'authors': authors,
'readme': readme,
'tags': tags,
'description': description,
'license': kwargs['license'],
'license_file': license_file or None, # Handle galaxy.yml having an empty string (None)
'dependencies': dependencies,
'repository': repository,
'documentation': documentation,
'homepage': homepage,
'issues': issues,
},
'file_manifest_file': {
'name': 'FILES.json',
'ftype': 'file',
'chksum_type': 'sha256',
'chksum_sha256': None, # Filled out in _build_collection_tar
'format': MANIFEST_FORMAT
},
'format': MANIFEST_FORMAT,
}
return manifest
def _build_collection_tar(
b_collection_path, # type: bytes
b_tar_path, # type: bytes
collection_manifest, # type: CollectionManifestType
file_manifest, # type: FilesManifestType
): # type: (...) -> str
"""Build a tar.gz collection artifact from the manifest data."""
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
with _tempdir() as b_temp_path:
b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
# Add the MANIFEST.json and FILES.json file to the archive
for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_io = BytesIO(b)
tar_info = tarfile.TarInfo(name)
tar_info.size = len(b)
tar_info.mtime = int(time.time())
tar_info.mode = S_IRWU_RG_RO
tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
for file_info in file_manifest['files']: # type: ignore[union-attr]
if file_info['name'] == '.':
continue
# arcname expects a native string, cannot be bytes
filename = to_native(file_info['name'], errors='surrogate_or_strict')
b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
def reset_stat(tarinfo):
if tarinfo.type != tarfile.SYMTYPE:
existing_is_exec = tarinfo.mode & stat.S_IXUSR
tarinfo.mode = S_IRWXU_RXG_RXO if existing_is_exec or tarinfo.isdir() else S_IRWU_RG_RO
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = ''
return tarinfo
if os.path.islink(b_src_path):
b_link_target = os.path.realpath(b_src_path)
if not os.path.exists(b_link_target):
raise AnsibleError(f"Failed to find the target path '{to_native(b_link_target)}' for the symlink '{to_native(b_src_path)}'.")
if _is_child_path(b_link_target, b_collection_path):
b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))
tar_info = tarfile.TarInfo(filename)
tar_info.type = tarfile.SYMTYPE
tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')
tar_info = reset_stat(tar_info)
tar_file.addfile(tarinfo=tar_info)
continue
# Dealing with a normal file, just add it by name.
tar_file.add(
to_native(os.path.realpath(b_src_path)),
arcname=filename,
recursive=False,
filter=reset_stat,
)
shutil.copy(to_native(b_tar_filepath), to_native(b_tar_path))
collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
collection_manifest['collection_info']['name'])
tar_path = to_text(b_tar_path)
display.display(u'Created collection for %s at %s' % (collection_name, tar_path))
return tar_path
def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest):
"""Build a collection directory from the manifest data.
This should follow the same pattern as _build_collection_tar.
"""
os.makedirs(b_collection_output, mode=S_IRWXU_RXG_RXO)
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
# Write contents to the files
for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict'))
with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
shutil.copyfileobj(b_io, file_obj)
os.chmod(b_path, S_IRWU_RG_RO)
base_directories = []
for file_info in sorted(file_manifest['files'], key=lambda x: x['name']):
if file_info['name'] == '.':
continue
src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict'))
dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
existing_is_exec = os.stat(src_file, follow_symlinks=False).st_mode & stat.S_IXUSR
mode = S_IRWXU_RXG_RXO if existing_is_exec else S_IRWU_RG_RO
# ensure symlinks to dirs are not translated to empty dirs
if os.path.isdir(src_file) and not os.path.islink(src_file):
mode = S_IRWXU_RXG_RXO
base_directories.append(src_file)
os.mkdir(dest_file, mode)
else:
# do not follow symlinks to ensure the original link is used
shutil.copyfile(src_file, dest_file, follow_symlinks=False)
# avoid setting specific permission on symlinks since it does not
# support avoid following symlinks and will thrown an exception if the
# symlink target does not exist
if not os.path.islink(dest_file):
os.chmod(dest_file, mode)
collection_output = to_text(b_collection_output)
return collection_output
def _normalize_collection_path(path):
str_path = path.as_posix() if isinstance(path, pathlib.Path) else path
return pathlib.Path(
# This is annoying, but GalaxyCLI._resolve_path did it
os.path.expandvars(str_path)
).expanduser().absolute()
def find_existing_collections(path_filter, artifacts_manager, namespace_filter=None, collection_filter=None, dedupe=True):
"""Locate all collections under a given path.
:param path: Collection dirs layout search path.
:param artifacts_manager: Artifacts manager.
"""
if files is None:
raise AnsibleError('importlib_resources is not installed and is required')
if path_filter and not is_sequence(path_filter):
path_filter = [path_filter]
if namespace_filter and not is_sequence(namespace_filter):
namespace_filter = [namespace_filter]
if collection_filter and not is_sequence(collection_filter):
collection_filter = [collection_filter]
paths = set()
for path in files('ansible_collections').glob('*/*/'):
path = _normalize_collection_path(path)
if not path.is_dir():
continue
if path_filter:
for pf in path_filter:
try:
path.relative_to(_normalize_collection_path(pf))
except ValueError:
continue
break
else:
continue
paths.add(path)
seen = set()
for path in paths:
namespace = path.parent.name
name = path.name
if namespace_filter and namespace not in namespace_filter:
continue
if collection_filter and name not in collection_filter:
continue
if dedupe:
try:
collection_path = files(f'ansible_collections.{namespace}.{name}')
except ImportError:
continue
if collection_path in seen:
continue
seen.add(collection_path)
else:
collection_path = path
b_collection_path = to_bytes(collection_path.as_posix())
try:
req = Candidate.from_dir_path_as_unknown(b_collection_path, artifacts_manager)
except ValueError as val_err:
display.warning(f'{val_err}')
continue
display.vvv(
u"Found installed collection {coll!s} at '{path!s}'".
format(coll=to_text(req), path=to_text(req.src))
)
yield req
def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?
# type: (Candidate, str, ConcreteArtifactsManager) -> None
"""Install a collection under a given path.
:param collection: Collection to be installed.
:param path: Collection dirs layout path.
:param artifacts_manager: Artifacts manager.
"""
b_artifact_path = artifacts_manager.get_artifact_path_from_unknown(collection)
collection_path = os.path.join(path, collection.namespace, collection.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.display(
u"Installing '{coll!s}' to '{path!s}'".
format(coll=to_text(collection), path=collection_path),
)
if os.path.exists(b_collection_path):
shutil.rmtree(b_collection_path)
if collection.is_dir:
install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)
else:
install_artifact(
b_artifact_path,
b_collection_path,
artifacts_manager._b_working_directory,
collection.signatures,
artifacts_manager.keyring,
artifacts_manager.required_successful_signature_count,
artifacts_manager.ignore_signature_errors,
)
remove_source_metadata(collection, b_collection_path)
if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):
write_source_metadata(
collection,
b_collection_path,
artifacts_manager
)
display.display(
'{coll!s} was installed successfully'.
format(coll=to_text(collection)),
)
def write_source_metadata(collection, b_collection_path, artifacts_manager):
# type: (Candidate, bytes, ConcreteArtifactsManager) -> None
source_data = artifacts_manager.get_galaxy_artifact_source_info(collection)
b_yaml_source_data = to_bytes(yaml_dump(source_data), errors='surrogate_or_strict')
b_info_dest = collection.construct_galaxy_info_path(b_collection_path)
b_info_dir = os.path.split(b_info_dest)[0]
if os.path.exists(b_info_dir):
shutil.rmtree(b_info_dir)
try:
os.mkdir(b_info_dir, mode=S_IRWXU_RXG_RXO)
with open(b_info_dest, mode='w+b') as fd:
fd.write(b_yaml_source_data)
os.chmod(b_info_dest, S_IRWU_RG_RO)
except Exception:
# Ensure we don't leave the dir behind in case of a failure.
if os.path.isdir(b_info_dir):
shutil.rmtree(b_info_dir)
raise
def remove_source_metadata(collection, b_collection_path):
pattern = f"{collection.namespace}.{collection.name}-*.info"
info_path = os.path.join(
b_collection_path,
b'../../',
to_bytes(pattern, errors='surrogate_or_strict')
)
if (outdated_info := glob.glob(info_path)):
display.vvvv(f"Removing {pattern} metadata from previous installations")
for info_dir in outdated_info:
try:
shutil.rmtree(info_dir)
except Exception:
pass
def verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors):
# type: (str, list[str], str, str, list[str]) -> None
failed_verify = False
coll_path_parts = to_text(manifest_file, errors='surrogate_or_strict').split(os.path.sep)
collection_name = '%s.%s' % (coll_path_parts[-3], coll_path_parts[-2]) # get 'ns' and 'coll' from /path/to/ns/coll/MANIFEST.json
if not verify_file_signatures(collection_name, manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors):
raise AnsibleError(f"Not installing {collection_name} because GnuPG signature verification failed.")
display.vvvv(f"GnuPG signature verification succeeded for {collection_name}")
def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path, signatures, keyring, required_signature_count, ignore_signature_errors):
"""Install a collection from tarball under a given path.
:param b_coll_targz_path: Collection tarball to be installed.
:param b_collection_path: Collection dirs layout path.
:param b_temp_path: Temporary dir path.
:param signatures: frozenset of signatures to verify the MANIFEST.json
:param keyring: The keyring used during GPG verification
:param required_signature_count: The number of signatures that must successfully verify the collection
:param ignore_signature_errors: GPG errors to ignore during signature verification
"""
try:
with tarfile.open(b_coll_targz_path, mode='r') as collection_tar:
# Verify the signature on the MANIFEST.json before extracting anything else
_extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path)
if keyring is not None:
manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME)
verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors)
files_member_obj = collection_tar.getmember('FILES.json')
with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj):
files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
_extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
for file_info in files['files']:
file_name = file_info['name']
if file_name == '.':
continue
if file_info['ftype'] == 'file':
_extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
expected_hash=file_info['chksum_sha256'])
else:
_extract_tar_dir(collection_tar, file_name, b_collection_path)
except Exception:
# Ensure we don't leave the dir behind in case of a failure.
shutil.rmtree(b_collection_path)
b_namespace_path = os.path.dirname(b_collection_path)
if not os.listdir(b_namespace_path):
os.rmdir(b_namespace_path)
raise
def install_src(collection, b_collection_path, b_collection_output_path, artifacts_manager):
r"""Install the collection from source control into given dir.
Generates the Ansible collection artifact data from a galaxy.yml and
installs the artifact to a directory.
This should follow the same pattern as build_collection, but instead
of creating an artifact, install it.
:param collection: Collection to be installed.
:param b_collection_path: Collection dirs layout path.
:param b_collection_output_path: The installation directory for the \
collection artifact.
:param artifacts_manager: Artifacts manager.
:raises AnsibleError: If no collection metadata found.
"""
collection_meta = artifacts_manager.get_direct_collection_meta(collection)
if 'build_ignore' not in collection_meta: # installed collection, not src
# FIXME: optimize this? use a different process? copy instead of build?
collection_meta['build_ignore'] = []
collection_meta['manifest'] = Sentinel
collection_manifest = _build_manifest(**collection_meta)
file_manifest = _build_files_manifest(
b_collection_path,
collection_meta['namespace'], collection_meta['name'],
collection_meta['build_ignore'],
collection_meta['manifest'],
collection_meta['license_file'],
)
collection_output_path = _build_collection_dir(
b_collection_path, b_collection_output_path,
collection_manifest, file_manifest,
)
display.display(
'Created collection for {coll!s} at {path!s}'.
format(coll=collection, path=collection_output_path)
)
def _extract_tar_dir(tar, dirname, b_dest):
""" Extracts a directory from a collection tar. """
dirname = to_native(dirname, errors='surrogate_or_strict')
try:
tar_member = tar.getmember(dirname)
except KeyError:
raise AnsibleError("Unable to extract '%s' from collection" % dirname)
b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
b_parent_path = os.path.dirname(b_dir_path)
try:
os.makedirs(b_parent_path, mode=S_IRWXU_RXG_RXO)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path):
raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
"collection '%s'" % (to_native(dirname), b_link_path))
os.symlink(b_link_path, b_dir_path)
else:
if not os.path.isdir(b_dir_path):
os.mkdir(b_dir_path, S_IRWXU_RXG_RXO)
def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
""" Extracts a file from a collection tar. """
with _get_tar_file_member(tar, filename) as (tar_member, tar_obj):
if tar_member.type == tarfile.SYMTYPE:
actual_hash = _consume_file(tar_obj)
else:
with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
actual_hash = _consume_file(tar_obj, tmpfile_obj)
if expected_hash and actual_hash != expected_hash:
raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
% (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
b_parent_dir = os.path.dirname(b_dest_filepath)
if not _is_child_path(b_parent_dir, b_dest):
raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
% to_native(filename, errors='surrogate_or_strict'))
if not os.path.exists(b_parent_dir):
# Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
# makes sure we create the parent directory even if it wasn't set in the metadata.
os.makedirs(b_parent_dir, mode=S_IRWXU_RXG_RXO)
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath):
raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
"collection '%s'" % (to_native(filename), b_link_path))
os.symlink(b_link_path, b_dest_filepath)
else:
shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
# Default to rw-r--r-- and only add execute if the tar file has execute.
tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
new_mode = S_IRWU_RG_RO
if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
new_mode |= S_IXANY
os.chmod(b_dest_filepath, new_mode)
def _get_tar_file_member(tar, filename):
n_filename = to_native(filename, errors='surrogate_or_strict')
try:
member = tar.getmember(n_filename)
except KeyError:
raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
to_native(tar.name),
n_filename))
return _tarfile_extract(tar, member)
def _get_json_from_tar_file(b_path, filename):
file_contents = ''
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
bufsize = 65536
data = tar_obj.read(bufsize)
while data:
file_contents += to_text(data)
data = tar_obj.read(bufsize)
return json.loads(file_contents)
def _get_tar_file_hash(b_path, filename):
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
return _consume_file(tar_obj)
def _get_file_hash(b_path, filename): # type: (bytes, str) -> str
filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
with open(filepath, 'rb') as fp:
return _consume_file(fp)
def _is_child_path(path, parent_path, link_name=None):
""" Checks that path is a path within the parent_path specified. """
b_path = to_bytes(path, errors='surrogate_or_strict')
if link_name and not os.path.isabs(b_path):
# If link_name is specified, path is the source of the link and we need to resolve the absolute path.
b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))
def _resolve_depenency_map(
requested_requirements, # type: t.Iterable[Requirement]
galaxy_apis, # type: t.Iterable[GalaxyAPI]
concrete_artifacts_manager, # type: ConcreteArtifactsManager
preferred_candidates, # type: t.Iterable[Candidate] | None
no_deps, # type: bool
allow_pre_release, # type: bool
upgrade, # type: bool
include_signatures, # type: bool
offline, # type: bool
): # type: (...) -> dict[str, Candidate]
"""Return the resolved dependency map."""
if not HAS_RESOLVELIB:
raise AnsibleError("Failed to import resolvelib, check that a supported version is installed")
if not HAS_PACKAGING:
raise AnsibleError("Failed to import packaging, check that a supported version is installed")
req = None
try:
dist = distribution('ansible-core')
except Exception:
pass
else:
req = next((rr for r in (dist.requires or []) if (rr := PkgReq(r)).name == 'resolvelib'), None)
finally:
if req is None:
# TODO: replace the hardcoded versions with a warning if the dist info is missing
# display.warning("Unable to find 'ansible-core' distribution requirements to verify the resolvelib version is supported.")
if not RESOLVELIB_LOWERBOUND <= RESOLVELIB_VERSION < RESOLVELIB_UPPERBOUND:
raise AnsibleError(
f"ansible-galaxy requires resolvelib<{RESOLVELIB_UPPERBOUND.vstring},>={RESOLVELIB_LOWERBOUND.vstring}"
)
elif not req.specifier.contains(RESOLVELIB_VERSION.vstring):
raise AnsibleError(f"ansible-galaxy requires {req.name}{req.specifier}")
pre_release_hint = '' if allow_pre_release else (
'Hint: Pre-releases hosted on Galaxy or Automation Hub are not '
'installed by default unless a specific version is requested. '
'To enable pre-releases globally, use --pre.'
)
collection_dep_resolver = build_collection_dependency_resolver(
galaxy_apis=galaxy_apis,
concrete_artifacts_manager=concrete_artifacts_manager,
preferred_candidates=preferred_candidates,
with_deps=not no_deps,
with_pre_releases=allow_pre_release,
upgrade=upgrade,
include_signatures=include_signatures,
offline=offline,
)
try:
return collection_dep_resolver.resolve(
requested_requirements,
max_rounds=2000000, # NOTE: same constant pip uses
).mapping
except CollectionDependencyResolutionImpossible as dep_exc:
conflict_causes = (
'* {req.fqcn!s}:{req.ver!s} ({dep_origin!s})'.format(
req=req_inf.requirement,
dep_origin='direct request'
if req_inf.parent is None
else 'dependency of {parent!s}'.
format(parent=req_inf.parent),
)
for req_inf in dep_exc.causes
)
error_msg_lines = list(chain(
(
'Failed to resolve the requested '
'dependencies map. Could not satisfy the following '
'requirements:',
),
conflict_causes,
))
error_msg_lines.append(pre_release_hint)
raise AnsibleError('\n'.join(error_msg_lines)) from dep_exc
except CollectionDependencyInconsistentCandidate as dep_exc:
parents = [
str(p) for p in dep_exc.criterion.iter_parent()
if p is not None
]
error_msg_lines = [
(
'Failed to resolve the requested dependencies map. '
'Got the candidate {req.fqcn!s}:{req.ver!s} ({dep_origin!s}) '
'which didn\'t satisfy all of the following requirements:'.
format(
req=dep_exc.candidate,
dep_origin='direct request'
if not parents else 'dependency of {parent!s}'.
format(parent=', '.join(parents))
)
)
]
for req in dep_exc.criterion.iter_requirement():
error_msg_lines.append(
f'* {req.fqcn!s}:{req.ver!s}'
)
error_msg_lines.append(pre_release_hint)
raise AnsibleError('\n'.join(error_msg_lines)) from dep_exc
except ValueError as exc:
raise AnsibleError(to_native(exc)) from exc
| 79,024
|
Python
|
.py
| 1,599
| 38.554722
| 158
| 0.624501
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,073
|
galaxy_api_proxy.py
|
ansible_ansible/lib/ansible/galaxy/collection/galaxy_api_proxy.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""A facade for interfacing with multiple Galaxy instances."""
from __future__ import annotations
import typing as t
if t.TYPE_CHECKING:
from ansible.galaxy.api import CollectionVersionMetadata
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate, Requirement,
)
from ansible.galaxy.api import GalaxyAPI, GalaxyError
from ansible.module_utils.common.text.converters import to_text
from ansible.utils.display import Display
display = Display()
class MultiGalaxyAPIProxy:
"""A proxy that abstracts talking to multiple Galaxy instances."""
def __init__(self, apis: t.Iterable[GalaxyAPI], concrete_artifacts_manager: ConcreteArtifactsManager, offline: bool = False) -> None:
"""Initialize the target APIs list."""
self._apis = apis
self._concrete_art_mgr = concrete_artifacts_manager
self._offline = offline # Prevent all GalaxyAPI calls
@property
def is_offline_mode_requested(self):
return self._offline
def _assert_that_offline_mode_is_not_requested(self) -> None:
if self.is_offline_mode_requested:
raise NotImplementedError("The calling code is not supposed to be invoked in 'offline' mode.")
def _get_collection_versions(self, requirement: Requirement) -> t.Iterator[tuple[GalaxyAPI, str]]:
"""Helper for get_collection_versions.
Yield api, version pairs for all APIs,
and reraise the last error if no valid API was found.
"""
if self._offline:
return
found_api = False
last_error: Exception | None = None
api_lookup_order = (
(requirement.src, )
if isinstance(requirement.src, GalaxyAPI)
else self._apis
)
for api in api_lookup_order:
try:
versions = api.get_collection_versions(requirement.namespace, requirement.name)
except GalaxyError as api_err:
last_error = api_err
except Exception as unknown_err:
display.warning(
"Skipping Galaxy server {server!s}. "
"Got an unexpected error when getting "
"available versions of collection {fqcn!s}: {err!s}".
format(
server=api.api_server,
fqcn=requirement.fqcn,
err=to_text(unknown_err),
)
)
last_error = unknown_err
else:
found_api = True
for version in versions:
yield api, version
if not found_api and last_error is not None:
raise last_error
def get_collection_versions(self, requirement: Requirement) -> t.Iterable[tuple[str, GalaxyAPI]]:
"""Get a set of unique versions for FQCN on Galaxy servers."""
if requirement.is_concrete_artifact:
return {
(
self._concrete_art_mgr.
get_direct_collection_version(requirement),
requirement.src,
),
}
api_lookup_order = (
(requirement.src, )
if isinstance(requirement.src, GalaxyAPI)
else self._apis
)
return set(
(version, api)
for api, version in self._get_collection_versions(
requirement,
)
)
def get_collection_version_metadata(self, collection_candidate: Candidate) -> CollectionVersionMetadata:
"""Retrieve collection metadata of a given candidate."""
self._assert_that_offline_mode_is_not_requested()
api_lookup_order = (
(collection_candidate.src, )
if isinstance(collection_candidate.src, GalaxyAPI)
else self._apis
)
last_err: t.Optional[Exception]
for api in api_lookup_order:
try:
version_metadata = api.get_collection_version_metadata(
collection_candidate.namespace,
collection_candidate.name,
collection_candidate.ver,
)
except GalaxyError as api_err:
last_err = api_err
except Exception as unknown_err:
# `verify` doesn't use `get_collection_versions` since the version is already known.
# Do the same as `install` and `download` by trying all APIs before failing.
# Warn for debugging purposes, since the Galaxy server may be unexpectedly down.
last_err = unknown_err
display.warning(
"Skipping Galaxy server {server!s}. "
"Got an unexpected error when getting "
"available versions of collection {fqcn!s}: {err!s}".
format(
server=api.api_server,
fqcn=collection_candidate.fqcn,
err=to_text(unknown_err),
)
)
else:
self._concrete_art_mgr.save_collection_source(
collection_candidate,
version_metadata.download_url,
version_metadata.artifact_sha256,
api.token,
version_metadata.signatures_url,
version_metadata.signatures,
)
return version_metadata
raise last_err
def get_collection_dependencies(self, collection_candidate: Candidate) -> dict[str, str]:
# FIXME: return Requirement instances instead?
"""Retrieve collection dependencies of a given candidate."""
if collection_candidate.is_concrete_artifact:
return (
self.
_concrete_art_mgr.
get_direct_collection_dependencies
)(collection_candidate)
return (
self.
get_collection_version_metadata(collection_candidate).
dependencies
)
def get_signatures(self, collection_candidate: Candidate) -> list[str]:
self._assert_that_offline_mode_is_not_requested()
namespace = collection_candidate.namespace
name = collection_candidate.name
version = collection_candidate.ver
last_err: Exception | None = None
api_lookup_order = (
(collection_candidate.src, )
if isinstance(collection_candidate.src, GalaxyAPI)
else self._apis
)
for api in api_lookup_order:
try:
return api.get_collection_signatures(namespace, name, version)
except GalaxyError as api_err:
last_err = api_err
except Exception as unknown_err:
# Warn for debugging purposes, since the Galaxy server may be unexpectedly down.
last_err = unknown_err
display.warning(
"Skipping Galaxy server {server!s}. "
"Got an unexpected error when getting "
"available versions of collection {fqcn!s}: {err!s}".
format(
server=api.api_server,
fqcn=collection_candidate.fqcn,
err=to_text(unknown_err),
)
)
if last_err:
raise last_err
return []
| 7,805
|
Python
|
.py
| 179
| 30.413408
| 137
| 0.574381
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,074
|
play.py
|
ansible_ansible/lib/ansible/playbook/play.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleParserError, AnsibleAssertionError, AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.six import binary_type, string_types, text_type
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.playbook.taggable import Taggable
from ansible.vars.manager import preprocess_vars
from ansible.utils.display import Display
display = Display()
__all__ = ['Play']
class Play(Base, Taggable, CollectionSearch):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
hosts = NonInheritableFieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True, priority=-2)
# Facts
gather_facts = NonInheritableFieldAttribute(isa='bool', default=None, always_post_validate=True)
gather_subset = NonInheritableFieldAttribute(isa='list', default=None, listof=string_types, always_post_validate=True)
gather_timeout = NonInheritableFieldAttribute(isa='int', default=None, always_post_validate=True)
fact_path = NonInheritableFieldAttribute(isa='string', default=None)
# Variable Attributes
vars_files = NonInheritableFieldAttribute(isa='list', default=list, priority=99)
vars_prompt = NonInheritableFieldAttribute(isa='list', default=list, always_post_validate=False)
# Role Attributes
roles = NonInheritableFieldAttribute(isa='list', default=list, priority=90)
# Block (Task) Lists Attributes
handlers = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
pre_tasks = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
post_tasks = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
tasks = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
# Flag/Setting Attributes
force_handlers = NonInheritableFieldAttribute(isa='bool', default=context.cliargs_deferred_get('force_handlers'), always_post_validate=True)
max_fail_percentage = NonInheritableFieldAttribute(isa='percent', always_post_validate=True)
serial = NonInheritableFieldAttribute(isa='list', default=list, always_post_validate=True)
strategy = NonInheritableFieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
order = NonInheritableFieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.role_cache = {}
self.only_tags = set(context.CLIARGS.get('tags', [])) or frozenset(('all',))
self.skip_tags = set(context.CLIARGS.get('skip_tags', []))
self._action_groups = {}
self._group_actions = {}
def __repr__(self):
return self.get_name()
def _get_cached_role(self, role):
role_path = role.get_role_path()
role_cache = self.role_cache[role_path]
try:
idx = role_cache.index(role)
return role_cache[idx]
except ValueError:
raise AnsibleError(f'Cannot locate {role.get_name()} in role cache')
def _validate_hosts(self, attribute, name, value):
# Only validate 'hosts' if a value was passed in to original data set.
if 'hosts' in self._ds:
if not value:
raise AnsibleParserError("Hosts list cannot be empty. Please check your playbook")
if is_sequence(value):
# Make sure each item in the sequence is a valid string
for entry in value:
if entry is None:
raise AnsibleParserError("Hosts list cannot contain values of 'None'. Please check your playbook")
elif not isinstance(entry, (binary_type, text_type)):
raise AnsibleParserError("Hosts list contains an invalid host value: '{host!s}'".format(host=entry))
elif not isinstance(value, (binary_type, text_type)):
raise AnsibleParserError("Hosts list must be a sequence or string. Please check your playbook.")
def get_name(self):
""" return the name of the Play """
if self.name:
return self.name
if is_sequence(self.hosts):
self.name = ','.join(self.hosts)
else:
self.name = self.hosts or ''
return self.name
@staticmethod
def load(data, variable_manager=None, loader=None, vars=None):
p = Play()
if vars:
p.vars = vars.copy()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
"""
Adjusts play datastructure to cleanup old/legacy items
"""
if not isinstance(ds, dict):
raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for this play. "
"The use of 'user' is deprecated, and should be removed", obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
def _load_tasks(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
def _load_pre_tasks(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
def _load_post_tasks(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
def _load_handlers(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
"""
try:
return self._extend_value(
self.handlers,
load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
prepend=True
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
def _load_roles(self, attr, ds):
"""
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
"""
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
loader=self._loader, collection_search_list=self.collections)
except AssertionError as e:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
self.roles[:0] = roles
return self.roles
def _load_vars_prompt(self, attr, ds):
new_ds = preprocess_vars(ds)
vars_prompts = []
if new_ds is not None:
for prompt_data in new_ds:
if 'name' not in prompt_data:
raise AnsibleParserError("Invalid vars_prompt data structure, missing 'name' key", obj=ds)
for key in prompt_data:
if key not in ('name', 'prompt', 'default', 'private', 'confirm', 'encrypt', 'salt_size', 'salt', 'unsafe'):
raise AnsibleParserError("Invalid vars_prompt data structure, found unsupported key '%s'" % key, obj=ds)
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
"""
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
"""
block_list = []
if len(self.roles) > 0:
for r in self.roles:
# Don't insert tasks from ``import/include_role``, preventing
# duplicate execution at the wrong time
if r.from_include:
continue
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
"""
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
"""
block_list = []
if len(self.roles) > 0:
for r in self.roles:
if r.from_include:
continue
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
"""
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
"""
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block(play=self)
t = Task()
t.action = 'meta'
t.resolved_action = 'ansible.builtin.meta'
t.args['_raw_params'] = 'flush_handlers'
t.implicit = True
t.set_loader(self._loader)
if self.tags:
# Avoid calling flush_handlers in case the whole play is skipped on tags,
# this could be performance improvement since calling flush_handlers on
# large inventories could be expensive even if no hosts are notified
# since we call flush_handlers per host.
# Block.filter_tagged_tasks ignores evaluating tags on implicit meta
# tasks so we need to explicitly call Task.evaluate_tags here.
t.tags = self.tags
if t.evaluate_tags(self.only_tags, self.skip_tags, all_vars=self.vars):
flush_block.block = [t]
else:
flush_block.block = [t]
block_list = []
if self.force_handlers:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.implicit = True
noop_task.set_loader(self._loader)
b = Block(play=self)
b.block = self.pre_tasks or [noop_task]
b.always = [flush_block]
block_list.append(b)
tasks = self._compile_roles() + self.tasks
b = Block(play=self)
b.block = tasks or [noop_task]
b.always = [flush_block]
block_list.append(b)
b = Block(play=self)
b.block = self.post_tasks or [noop_task]
b.always = [flush_block]
block_list.append(b)
return block_list
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
if self.vars_files is None:
return []
elif not isinstance(self.vars_files, list):
return [self.vars_files]
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
data['included_path'] = self._included_path
data['action_groups'] = self._action_groups
data['group_actions'] = self._group_actions
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
self._included_path = data.get('included_path', None)
self._action_groups = data.get('action_groups', {})
self._group_actions = data.get('group_actions', {})
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.role_cache = self.role_cache.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
new_me._action_groups = self._action_groups
new_me._group_actions = self._group_actions
return new_me
| 16,527
|
Python
|
.py
| 341
| 38.803519
| 144
| 0.630975
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,075
|
play_context.py
|
ansible_ansible/lib/ansible/playbook/play_context.py
|
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible import constants as C
from ansible import context
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayContext']
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'become_flags',
'connection',
'docker_extra_args', # TODO: remove
'delegate_to',
'no_log',
'remote_user',
)
RESET_VARS = (
'ansible_connection',
'ansible_user',
'ansible_host',
'ansible_port',
# TODO: ???
'ansible_docker_extra_args',
'ansible_ssh_host',
'ansible_ssh_pass',
'ansible_ssh_port',
'ansible_ssh_user',
'ansible_ssh_private_key_file',
'ansible_ssh_pipelining',
'ansible_ssh_executable',
)
class PlayContext(Base):
"""
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
"""
# base
module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
shell = FieldAttribute(isa='string')
executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
remote_addr = FieldAttribute(isa='string')
password = FieldAttribute(isa='string')
timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
connection_user = FieldAttribute(isa='string')
private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_PIPELINING)
# networking modules
network_os = FieldAttribute(isa='string')
# docker FIXME: remove these
docker_extra_args = FieldAttribute(isa='string')
# ???
connection_lockfd = FieldAttribute(isa='int')
# privilege escalation fields
become = FieldAttribute(isa='bool')
become_method = FieldAttribute(isa='string')
become_user = FieldAttribute(isa='string')
become_pass = FieldAttribute(isa='string')
become_exe = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_EXE)
become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
prompt = FieldAttribute(isa='string')
# general flags
only_tags = FieldAttribute(isa='set', default=set)
skip_tags = FieldAttribute(isa='set', default=set)
start_at_task = FieldAttribute(isa='string')
step = FieldAttribute(isa='bool', default=False)
# "PlayContext.force_handlers should not be used, the calling code should be using play itself instead"
force_handlers = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, passwords=None, connection_lockfd=None):
# Note: play is really not optional. The only time it could be omitted is when we create
# a PlayContext just so we can invoke its deserialize method to load it from a serialized
# data source.
super(PlayContext, self).__init__()
if passwords is None:
passwords = {}
self.password = passwords.get('conn_pass', '')
self.become_pass = passwords.get('become_pass', '')
self._become_plugin = None
self.prompt = ''
self.success_key = ''
# a file descriptor to be used during locking operations
self.connection_lockfd = connection_lockfd
# set options before play to allow play to override them
if context.CLIARGS:
self.set_attributes_from_cli()
else:
self._internal_verbosity = 0
if play:
self.set_attributes_from_play(play)
def set_attributes_from_plugin(self, plugin):
# generic derived from connection plugin, temporary for backwards compat, in the end we should not set play_context properties
# get options for plugins
options = C.config.get_configuration_definitions(plugin.plugin_type, plugin._load_name)
for option in options:
if option:
flag = options[option].get('name')
if flag:
setattr(self, flag, plugin.get_option(flag))
def set_attributes_from_play(self, play):
self.force_handlers = play.force_handlers
def set_attributes_from_cli(self):
"""
Configures this connection information instance with data from
options specified by the user on the command line. These have a
lower precedence than those set on the play or host.
"""
if context.CLIARGS.get('timeout', False):
self.timeout = int(context.CLIARGS['timeout'])
# From the command line. These should probably be used directly by plugins instead
# For now, they are likely to be moved to FieldAttribute defaults
self.private_key_file = context.CLIARGS.get('private_key_file') # Else default
self._internal_verbosity = context.CLIARGS.get('verbosity') # Else default
# Not every cli that uses PlayContext has these command line args so have a default
self.start_at_task = context.CLIARGS.get('start_at_task', None) # Else default
def set_task_and_variable_override(self, task, variables, templar):
"""
Sets attributes from the task if they are set, which will override
those from the play.
:arg task: the task object with the parameters that were set on it
:arg variables: variables from inventory
:arg templar: templar instance if templating variables is needed
"""
new_info = self.copy()
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in TASK_ATTRIBUTE_OVERRIDES:
if (attr_val := getattr(task, attr, None)) is not None:
setattr(new_info, attr, attr_val)
# next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from the variable list.
# If the value 'ansible_delegated_vars' is in the variables, it means
# we have a delegated-to host, so we check there first before looking
# at the variables in general
if task.delegate_to is not None:
# In the case of a loop, the delegated_to host may have been
# templated based on the loop variable, so we try and locate
# the host name in the delegated variable dictionary here
delegated_host_name = templar.template(task.delegate_to)
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
delegated_transport = C.DEFAULT_TRANSPORT
for transport_var in C.MAGIC_VARIABLE_MAPPING.get('connection'):
if transport_var in delegated_vars:
delegated_transport = delegated_vars[transport_var]
break
# make sure this delegated_to host has something set for its remote
# address, otherwise we default to connecting to it by name. This
# may happen when users put an IP entry into their inventory, or if
# they rely on DNS for a non-inventory hostname
for address_var in ('ansible_%s_host' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_addr'):
if address_var in delegated_vars:
break
else:
display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
delegated_vars['ansible_host'] = delegated_host_name
# reset the port back to the default if none was specified, to prevent
# the delegated host from inheriting the original host's setting
for port_var in ('ansible_%s_port' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('port'):
if port_var in delegated_vars:
break
else:
if delegated_transport == 'winrm':
delegated_vars['ansible_port'] = 5986
else:
delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
# and likewise for the remote user
for user_var in ('ansible_%s_user' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_user'):
if user_var in delegated_vars and delegated_vars[user_var]:
break
else:
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
else:
delegated_vars = dict()
# setup shell
for exe_var in C.MAGIC_VARIABLE_MAPPING.get('executable'):
if exe_var in variables:
setattr(new_info, 'executable', variables.get(exe_var))
attrs_considered = []
for (attr, variable_names) in C.MAGIC_VARIABLE_MAPPING.items():
for variable_name in variable_names:
if attr in attrs_considered:
continue
# if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
if task.delegate_to is not None:
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
setattr(new_info, attr, delegated_vars[variable_name])
attrs_considered.append(attr)
elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
attrs_considered.append(attr)
# no else, as no other vars should be considered
# become legacy updates -- from inventory file (inventory overrides
# commandline)
for become_pass_name in C.MAGIC_VARIABLE_MAPPING.get('become_pass'):
if become_pass_name in variables:
break
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# special overrides for the connection setting
if len(delegated_vars) > 0:
# in the event that we were using local before make sure to reset the
# connection type to the default transport for the delegated-to host,
# if not otherwise specified
for connection_type in C.MAGIC_VARIABLE_MAPPING.get('connection'):
if connection_type in delegated_vars:
break
else:
remote_addr_local = new_info.remote_addr in C.LOCALHOST
inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
if remote_addr_local and inv_hostname_local:
setattr(new_info, 'connection', 'local')
elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)
# we store original in 'connection_user' for use of network/other modules that fallback to it as login user
# connection_user to be deprecated once connection=local is removed for, as local resets remote_user
if new_info.connection == 'local':
if not new_info.connection_user:
new_info.connection_user = new_info.remote_user
# for case in which connection plugin still uses pc.remote_addr and in it's own options
# specifies 'default: inventory_hostname', but never added to vars:
if new_info.remote_addr == 'inventory_hostname':
new_info.remote_addr = variables.get('inventory_hostname')
display.warning('The "%s" connection plugin has an improperly configured remote target value, '
'forcing "inventory_hostname" templated value instead of the string' % new_info.connection)
if task.check_mode is not None:
new_info.check_mode = task.check_mode
if task.diff is not None:
new_info.diff = task.diff
return new_info
def set_become_plugin(self, plugin):
self._become_plugin = plugin
def update_vars(self, variables):
"""
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
"""
for prop, var_list in C.MAGIC_VARIABLE_MAPPING.items():
try:
if 'become' in prop:
continue
var_val = getattr(self, prop)
for var_opt in var_list:
if var_opt not in variables and var_val is not None:
variables[var_opt] = var_val
except AttributeError:
continue
| 13,846
|
Python
|
.py
| 269
| 41.561338
| 154
| 0.649379
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,076
|
conditional.py
|
ansible_ansible/lib/ansible/playbook/conditional.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import typing as t
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.common.text.converters import to_native
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
from ansible.utils.display import Display
display = Display()
class Conditional:
"""
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
"""
when = FieldAttribute(isa='list', default=list, extend=True, prepend=True)
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super().__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def evaluate_conditional(self, templar: Templar, all_vars: dict[str, t.Any]) -> bool:
"""
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
"""
return self.evaluate_conditional_with_result(templar, all_vars)[0]
def evaluate_conditional_with_result(self, templar: Templar, all_vars: dict[str, t.Any]) -> tuple[bool, t.Optional[str]]:
"""Loops through the conditionals set on this object, returning
False if any of them evaluate as such as well as the condition
that was false.
"""
for conditional in self.when:
if conditional is None or conditional == "":
res = True
elif isinstance(conditional, bool):
res = conditional
else:
try:
res = self._check_conditional(conditional, templar, all_vars)
except AnsibleError as e:
raise AnsibleError(
"The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)),
obj=getattr(self, '_ds', None)
)
display.debug("Evaluated conditional (%s): %s" % (conditional, res))
if not res:
return res, conditional
return True, None
def _check_conditional(self, conditional: str, templar: Templar, all_vars: dict[str, t.Any]) -> bool:
original = conditional
templar.available_variables = all_vars
try:
if templar.is_template(conditional):
display.warning(
"conditional statements should not include jinja2 "
"templating delimiters such as {{ }} or {%% %%}. "
"Found: %s" % conditional
)
conditional = templar.template(conditional)
if isinstance(conditional, bool):
return conditional
elif conditional == "":
return False
# If the result of the first-pass template render (to resolve inline templates) is marked unsafe,
# explicitly disable lookups on the final pass to prevent evaluation of untrusted content in the
# constructed template.
disable_lookups = hasattr(conditional, '__UNSAFE__')
# NOTE The spaces around True and False are intentional to short-circuit literal_eval for
# jinja2_native=False and avoid its expensive calls.
return templar.template(
"{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional,
disable_lookups=disable_lookups).strip() == "True"
except AnsibleUndefinedVariable as e:
raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
| 4,863
|
Python
|
.py
| 97
| 40.268041
| 125
| 0.636
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,077
|
delegatable.py
|
ansible_ansible/lib/ansible/playbook/delegatable.py
|
# -*- coding: utf-8 -*-
# Copyright The Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.playbook.attribute import FieldAttribute
class Delegatable:
delegate_to = FieldAttribute(isa='string')
delegate_facts = FieldAttribute(isa='bool')
def _post_validate_delegate_to(self, attr, value, templar):
"""This method exists just to make it clear that ``Task.post_validate``
does not template this value, it is set via ``TaskExecutor._calculate_delegate_to``
"""
return value
| 625
|
Python
|
.py
| 13
| 43.384615
| 92
| 0.717105
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,078
|
taggable.py
|
ansible_ansible/lib/ansible/playbook/taggable.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils.common.sentinel import Sentinel
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
def _flatten_tags(tags: list) -> list:
rv = set()
for tag in tags:
if isinstance(tag, list):
rv.update(tag)
else:
rv.add(tag)
return list(rv)
class Taggable:
untagged = frozenset(['untagged'])
tags = FieldAttribute(isa='list', default=list, listof=(string_types, int), extend=True)
def _load_tags(self, attr, ds):
if isinstance(ds, list):
return ds
elif isinstance(ds, string_types):
return [x.strip() for x in ds.split(',')]
else:
raise AnsibleError('tags must be specified as a list', obj=ds)
def evaluate_tags(self, only_tags, skip_tags, all_vars):
""" this checks if the current item should be executed depending on tag options """
if self.tags:
templar = Templar(loader=self._loader, variables=all_vars)
obj = self
while obj is not None:
if (_tags := getattr(obj, "_tags", Sentinel)) is not Sentinel:
obj._tags = _flatten_tags(templar.template(_tags))
obj = obj._parent
tags = set(self.tags)
else:
# this makes isdisjoint work for untagged
tags = self.untagged
should_run = True # default, tasks to run
if only_tags:
if 'always' in tags:
should_run = True
elif ('all' in only_tags and 'never' not in tags):
should_run = True
elif not tags.isdisjoint(only_tags):
should_run = True
elif 'tagged' in only_tags and tags != self.untagged and 'never' not in tags:
should_run = True
else:
should_run = False
if should_run and skip_tags:
# Check for tags that we need to skip
if 'all' in skip_tags:
if 'always' not in tags or 'always' in skip_tags:
should_run = False
elif not tags.isdisjoint(skip_tags):
should_run = False
elif 'tagged' in skip_tags and tags != self.untagged:
should_run = False
return should_run
| 3,179
|
Python
|
.py
| 75
| 33.666667
| 92
| 0.627388
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,079
|
block.py
|
ansible_ansible/lib/ansible/playbook/block.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import ansible.constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.sentinel import Sentinel
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.delegatable import Delegatable
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.notifiable import Notifiable
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
class Block(Base, Conditional, CollectionSearch, Taggable, Notifiable, Delegatable):
# main block fields containing the task lists
block = NonInheritableFieldAttribute(isa='list', default=list)
rescue = NonInheritableFieldAttribute(isa='list', default=list)
always = NonInheritableFieldAttribute(isa='list', default=list)
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
# otherwise = FieldAttribute(isa='list')
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
self._play = play
self._role = role
self._parent = None
self._dep_chain = None
self._use_handlers = use_handlers
self._implicit = implicit
if task_include:
self._parent = task_include
elif parent_block:
self._parent = parent_block
super(Block, self).__init__()
def __repr__(self):
return "BLOCK(uuid=%s)(id=%s)(parent=%s)" % (self._uuid, id(self), self._parent)
def __eq__(self, other):
"""object comparison based on _uuid"""
return self._uuid == other._uuid
def __ne__(self, other):
"""object comparison based on _uuid"""
return self._uuid != other._uuid
def get_vars(self):
"""
Blocks do not store variables directly, however they may be a member
of a role or task include which does, so return those if present.
"""
all_vars = {}
if self._parent:
all_vars |= self._parent.get_vars()
all_vars |= self.vars.copy()
return all_vars
@staticmethod
def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
implicit = not Block.is_block(data)
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
@staticmethod
def is_block(ds):
is_block = False
if isinstance(ds, dict):
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
return is_block
def preprocess_data(self, ds):
"""
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
"""
if not Block.is_block(ds):
if isinstance(ds, list):
return super(Block, self).preprocess_data(dict(block=ds))
else:
return super(Block, self).preprocess_data(dict(block=[ds]))
return super(Block, self).preprocess_data(ds)
def _load_block(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading a block", obj=self._ds, orig_exc=e)
def _load_rescue(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading rescue.", obj=self._ds, orig_exc=e)
def _load_always(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading always", obj=self._ds, orig_exc=e)
def _validate_always(self, attr, name, value):
if value and not self.block:
raise AnsibleParserError("'%s' keyword cannot be used without 'block'" % name, obj=self._ds)
_validate_rescue = _validate_always
def get_dep_chain(self):
if self._dep_chain is None:
if self._parent:
return self._parent.get_dep_chain()
else:
return None
else:
return self._dep_chain[:]
def copy(self, exclude_parent=False, exclude_tasks=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
new_task = task.copy(exclude_parent=True)
if task._parent:
new_task._parent = task._parent.copy(exclude_tasks=True)
if task._parent == new_block:
# If task._parent is the same as new_block, just replace it
new_task._parent = new_block
else:
# task may not be a direct child of new_block, search for the correct place to insert new_block
cur_obj = new_task._parent
while cur_obj._parent and cur_obj._parent != new_block:
cur_obj = cur_obj._parent
cur_obj._parent = new_block
else:
new_task._parent = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
if self._dep_chain is not None:
new_me._dep_chain = self._dep_chain[:]
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=True)
if not exclude_tasks:
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._role = None
if self._role:
new_me._role = self._role
new_me.validate()
return new_me
def serialize(self):
"""
Override of the default serialize method, since when we're serializing
a task we don't want to include the attribute list of tasks.
"""
data = dict()
for attr in self.fattributes:
if attr not in ('block', 'rescue', 'always'):
data[attr] = getattr(self, attr)
data['dep_chain'] = self.get_dep_chain()
if self._role is not None:
data['role'] = self._role.serialize()
if self._parent is not None:
data['parent'] = self._parent.copy(exclude_tasks=True).serialize()
data['parent_type'] = self._parent.__class__.__name__
return data
def deserialize(self, data):
"""
Override of the default deserialize method, to match the above overridden
serialize method
"""
# import is here to avoid import loops
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
# we don't want the full set of attributes (the task lists), as that
# would lead to a serialize/deserialize loop
for attr in self.fattributes:
if attr in data and attr not in ('block', 'rescue', 'always'):
setattr(self, attr, data.get(attr))
self._dep_chain = data.get('dep_chain', None)
# if there was a serialized role, unpack it too
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
parent_data = data.get('parent')
if parent_data:
parent_type = data.get('parent_type')
if parent_type == 'Block':
p = Block()
elif parent_type == 'TaskInclude':
p = TaskInclude()
elif parent_type == 'HandlerTaskInclude':
p = HandlerTaskInclude()
p.deserialize(parent_data)
self._parent = p
self._dep_chain = self._parent.get_dep_chain()
def set_loader(self, loader):
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
elif self._role:
self._role.set_loader(loader)
dep_chain = self.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.set_loader(loader)
def _get_parent_attribute(self, attr, omit=False):
"""
Generic logic to get the attribute or parent attribute for a block value.
"""
fattr = self.fattributes[attr]
extend = fattr.extend
prepend = fattr.prepend
try:
# omit self, and only get parent values
if omit:
value = Sentinel
else:
value = getattr(self, f'_{attr}', Sentinel)
# If parent is static, we can grab attrs from the parent
# otherwise, defer to the grandparent
if getattr(self._parent, 'statically_loaded', True):
_parent = self._parent
else:
_parent = self._parent._parent
if _parent and (value is Sentinel or extend):
try:
if getattr(_parent, 'statically_loaded', True):
if hasattr(_parent, '_get_parent_attribute'):
parent_value = _parent._get_parent_attribute(attr)
else:
parent_value = getattr(_parent, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except AttributeError:
pass
if self._role and (value is Sentinel or extend):
try:
parent_value = getattr(self._role, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
dep_chain = self.get_dep_chain()
if dep_chain and (value is Sentinel or extend):
dep_chain.reverse()
for dep in dep_chain:
dep_value = getattr(dep, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, dep_value, prepend)
else:
value = dep_value
if value is not Sentinel and not extend:
break
except AttributeError:
pass
if self._play and (value is Sentinel or extend):
try:
play_value = getattr(self._play, f'_{attr}', Sentinel)
if play_value is not Sentinel:
if extend:
value = self._extend_value(value, play_value, prepend)
else:
value = play_value
except AttributeError:
pass
except KeyError:
pass
return value
def filter_tagged_tasks(self, all_vars):
"""
Creates a new block, with task lists filtered based on the tags.
"""
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
filtered_block = evaluate_block(task)
if filtered_block.has_tasks():
tmp_list.append(filtered_block)
elif ((task.action in C._ACTION_META and task.implicit) or
task.evaluate_tags(self._play.only_tags, self._play.skip_tags, all_vars=all_vars)):
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
new_block = block.copy(exclude_parent=True, exclude_tasks=True)
new_block._parent = block._parent
new_block.block = evaluate_and_append_task(block.block)
new_block.rescue = evaluate_and_append_task(block.rescue)
new_block.always = evaluate_and_append_task(block.always)
return new_block
return evaluate_block(self)
def get_tasks(self):
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
tmp_list.extend(evaluate_block(task))
else:
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
rv = evaluate_and_append_task(block.block)
rv.extend(evaluate_and_append_task(block.rescue))
rv.extend(evaluate_and_append_task(block.always))
return rv
return evaluate_block(self)
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
def get_include_params(self):
if self._parent:
return self._parent.get_include_params()
else:
return dict()
def all_parents_static(self):
"""
Determine if all of the parents of this block were statically loaded
or not. Since Task/TaskInclude objects may be in the chain, they simply
call their parents all_parents_static() method. Only Block objects in
the chain check the statically_loaded value of the parent.
"""
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude) and not self._parent.statically_loaded:
return False
return self._parent.all_parents_static()
return True
def get_first_parent_include(self):
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude):
return self._parent
return self._parent.get_first_parent_include()
return None
| 16,529
|
Python
|
.py
| 371
| 32.067385
| 139
| 0.573
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,080
|
attribute.py
|
ansible_ansible/lib/ansible/playbook/attribute.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.module_utils.common.sentinel import Sentinel
_CONTAINERS = frozenset(('list', 'dict', 'set'))
class Attribute:
def __init__(
self,
isa=None,
private=False,
default=None,
required=False,
listof=None,
priority=0,
class_type=None,
always_post_validate=False,
alias=None,
static=False,
):
"""
:class:`Attribute` specifies constraints for attributes of objects which
derive from playbook data. The attributes of the object are basically
a schema for the yaml playbook.
:kwarg isa: The type of the attribute. Allowable values are a string
representation of any yaml basic datatype, python class, or percent.
(Enforced at post-validation time).
:kwarg private: Not used at runtime. The docs playbook keyword dumper uses it to determine
that a keyword should not be documented. mpdehaan had plans to remove attributes marked
private from the ds so they would not have been available at all.
:kwarg default: Default value if unspecified in the YAML document.
:kwarg required: Whether or not the YAML document must contain this field.
If the attribute is None when post-validated, an error will be raised.
:kwarg listof: If isa is set to "list", this can optionally be set to
ensure that all elements in the list are of the given type. Valid
values here are the same as those for isa.
:kwarg priority: The order in which the fields should be parsed. Generally
this does not need to be set, it is for rare situations where another
field depends on the fact that another field was parsed first.
:kwarg class_type: If isa is set to "class", this can be optionally set to
a class (not a string name). The YAML data for this field will be
passed to the __init__ method of that class during post validation and
the field will be an instance of that class.
:kwarg always_post_validate: Controls whether a field should be post
validated or not (default: False).
:kwarg alias: An alias to use for the attribute name, for situations where
the attribute name may conflict with a Python reserved word.
"""
self.isa = isa
self.private = private
self.default = default
self.required = required
self.listof = listof
self.priority = priority
self.class_type = class_type
self.always_post_validate = always_post_validate
self.alias = alias
self.static = static
if default is not None and self.isa in _CONTAINERS and not callable(default):
raise TypeError('defaults for FieldAttribute may not be mutable, please provide a callable instead')
def __set_name__(self, owner, name):
self.name = name
def __eq__(self, other):
return other.priority == self.priority
def __ne__(self, other):
return other.priority != self.priority
# NB: higher priority numbers sort first
def __lt__(self, other):
return other.priority < self.priority
def __gt__(self, other):
return other.priority > self.priority
def __le__(self, other):
return other.priority <= self.priority
def __ge__(self, other):
return other.priority >= self.priority
def __get__(self, obj, obj_type=None):
method = f'_get_attr_{self.name}'
if hasattr(obj, method):
# NOTE this appears to be not used in the codebase,
# _get_attr_connection has been replaced by ConnectionFieldAttribute.
# Leaving it here for test_attr_method from
# test/units/playbook/test_base.py to pass and for backwards compat.
if getattr(obj, '_squashed', False):
value = getattr(obj, f'_{self.name}', Sentinel)
else:
value = getattr(obj, method)()
else:
value = getattr(obj, f'_{self.name}', Sentinel)
if value is Sentinel:
value = self.default
if callable(value):
value = value()
setattr(obj, f'_{self.name}', value)
return value
def __set__(self, obj, value):
setattr(obj, f'_{self.name}', value)
if self.alias is not None:
setattr(obj, f'_{self.alias}', value)
# NOTE this appears to be not needed in the codebase,
# leaving it here for test_attr_int_del from
# test/units/playbook/test_base.py to pass.
def __delete__(self, obj):
delattr(obj, f'_{self.name}')
class NonInheritableFieldAttribute(Attribute):
...
class FieldAttribute(Attribute):
def __init__(self, extend=False, prepend=False, **kwargs):
super().__init__(**kwargs)
self.extend = extend
self.prepend = prepend
def __get__(self, obj, obj_type=None):
if getattr(obj, '_squashed', False) or getattr(obj, '_finalized', False):
value = getattr(obj, f'_{self.name}', Sentinel)
else:
try:
value = obj._get_parent_attribute(self.name)
except AttributeError:
method = f'_get_attr_{self.name}'
if hasattr(obj, method):
# NOTE this appears to be not needed in the codebase,
# _get_attr_connection has been replaced by ConnectionFieldAttribute.
# Leaving it here for test_attr_method from
# test/units/playbook/test_base.py to pass and for backwards compat.
if getattr(obj, '_squashed', False):
value = getattr(obj, f'_{self.name}', Sentinel)
else:
value = getattr(obj, method)()
else:
value = getattr(obj, f'_{self.name}', Sentinel)
if value is Sentinel:
value = self.default
if callable(value):
value = value()
return value
class ConnectionFieldAttribute(FieldAttribute):
def __get__(self, obj, obj_type=None):
from ansible.module_utils.compat.paramiko import paramiko
from ansible.utils.ssh_functions import check_for_controlpersist
value = super().__get__(obj, obj_type)
if value == 'smart':
value = 'ssh'
# see if SSH can support ControlPersist if not use paramiko
if not check_for_controlpersist('ssh') and paramiko is not None:
value = "paramiko"
# if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems
elif value == 'persistent' and paramiko is not None:
value = 'paramiko'
return value
| 7,663
|
Python
|
.py
| 161
| 37.950311
| 121
| 0.63153
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,081
|
role_include.py
|
ansible_ansible/lib/ansible/playbook/role_include.py
|
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import ansible.constants as C
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.utils.display import Display
from ansible.module_utils.six import string_types
from ansible.template import Templar
__all__ = ['IncludeRole']
display = Display()
class IncludeRole(TaskInclude):
"""
A Role include is derived from a regular role to handle the special
circumstances related to the `- include_role: ...`
"""
BASE = frozenset(('name', 'role')) # directly assigned
FROM_ARGS = frozenset(('tasks_from', 'vars_from', 'defaults_from', 'handlers_from')) # used to populate from dict in role
OTHER_ARGS = frozenset(('apply', 'public', 'allow_duplicates', 'rolespec_validate')) # assigned to matching property
VALID_ARGS = BASE | FROM_ARGS | OTHER_ARGS # all valid args
# =================================================================================
# ATTRIBUTES
public = NonInheritableFieldAttribute(isa='bool', default=None, private=False, always_post_validate=True)
# private as this is a 'module options' vs a task property
allow_duplicates = NonInheritableFieldAttribute(isa='bool', default=True, private=True, always_post_validate=True)
rolespec_validate = NonInheritableFieldAttribute(isa='bool', default=True, private=True, always_post_validate=True)
def __init__(self, block=None, role=None, task_include=None):
super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
self._from_files = {}
self._parent_role = role
self._role_name = None
self._role_path = None
def get_name(self):
""" return the name of the task """
return self.name or "%s : %s" % (self.action, self._role_name)
def get_block_list(self, play=None, variable_manager=None, loader=None):
# only need play passed in when dynamic
if play is None:
myplay = self._parent._play
else:
myplay = play
ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader, collection_list=self.collections)
ri.vars |= self.vars
if variable_manager is not None:
available_variables = variable_manager.get_vars(play=myplay, task=self)
else:
available_variables = {}
templar = Templar(loader=loader, variables=available_variables)
from_files = templar.template(self._from_files)
# build role
actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=from_files,
from_include=True, validate=self.rolespec_validate, public=self.public, static=self.statically_loaded)
actual_role._metadata.allow_duplicates = self.allow_duplicates
# add role to play
myplay.roles.append(actual_role)
# save this for later use
self._role_path = actual_role._role_path
# compile role with parent roles as dependencies to ensure they inherit
# variables
dep_chain = actual_role.get_dep_chain()
p_block = self.build_parent_block()
# collections value is not inherited; override with the value we calculated during role setup
p_block.collections = actual_role.collections
blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
for b in blocks:
b._parent = p_block
# HACK: parent inheritance doesn't seem to have a way to handle this intermediate override until squashed/finalized
b.collections = actual_role.collections
# updated available handlers in play
handlers = actual_role.get_handler_blocks(play=myplay, dep_chain=dep_chain)
for h in handlers:
h._parent = p_block
myplay.handlers = myplay.handlers + handlers
return blocks, handlers
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
# Validate options
my_arg_names = frozenset(ir.args.keys())
# name is needed, or use role as alias
ir._role_name = ir.args.get('name', ir.args.get('role'))
if ir._role_name is None:
raise AnsibleParserError("'name' is a required field for %s." % ir.action, obj=data)
# validate bad args, otherwise we silently ignore
bad_opts = my_arg_names.difference(IncludeRole.VALID_ARGS)
if bad_opts:
raise AnsibleParserError('Invalid options for %s: %s' % (ir.action, ','.join(list(bad_opts))), obj=data)
# build options for role include/import tasks
for key in my_arg_names.intersection(IncludeRole.FROM_ARGS):
from_key = key.removesuffix('_from')
args_value = ir.args.get(key)
if not isinstance(args_value, string_types):
raise AnsibleParserError('Expected a string for %s but got %s instead' % (key, type(args_value)))
ir._from_files[from_key] = args_value
# apply is only valid for includes, not imports as they inherit directly
apply_attrs = ir.args.get('apply', {})
if apply_attrs and ir.action not in C._ACTION_INCLUDE_ROLE:
raise AnsibleParserError('Invalid options for %s: apply' % ir.action, obj=data)
elif not isinstance(apply_attrs, dict):
raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
# manual list as otherwise the options would set other task parameters we don't want.
for option in my_arg_names.intersection(IncludeRole.OTHER_ARGS):
setattr(ir, option, ir.args.get(option))
return ir
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
new_me.statically_loaded = self.statically_loaded
new_me._from_files = self._from_files.copy()
new_me._parent_role = self._parent_role
new_me._role_name = self._role_name
new_me._role_path = self._role_path
return new_me
def get_include_params(self):
v = super(IncludeRole, self).get_include_params()
if self._parent_role:
v |= self._parent_role.get_role_params()
v.setdefault('ansible_parent_role_names', []).insert(0, self._parent_role.get_name())
v.setdefault('ansible_parent_role_paths', []).insert(0, self._parent_role._role_path)
return v
| 7,575
|
Python
|
.py
| 135
| 48.274074
| 143
| 0.677476
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,082
|
included_file.py
|
ansible_ansible/lib/ansible/playbook/included_file.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.task_executor import remove_omit
from ansible.module_utils.common.text.converters import to_text
from ansible.playbook.handler import Handler
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.template import Templar
from ansible.utils.display import Display
display = Display()
class IncludedFile:
def __init__(self, filename, args, vars, task, is_role=False):
self._filename = filename
self._args = args
self._vars = vars
self._task = task
self._hosts = []
self._is_role = is_role
self._results = []
def add_host(self, host):
if host not in self._hosts:
self._hosts.append(host)
return
raise ValueError()
def __eq__(self, other):
return (other._filename == self._filename and
other._args == self._args and
other._vars == self._vars and
other._task._uuid == self._task._uuid and
other._task._parent._uuid == self._task._parent._uuid)
def __repr__(self):
return "%s (args=%s vars=%s): %s" % (self._filename, self._args, self._vars, self._hosts)
@staticmethod
def process_include_results(results, iterator, loader, variable_manager):
included_files = []
task_vars_cache = {}
for res in results:
original_host = res._host
original_task = res._task
if original_task.action in C._ACTION_ALL_INCLUDES:
if original_task.loop:
if 'results' not in res._result:
continue
include_results = res._result['results']
else:
include_results = [res._result]
for include_result in include_results:
# if the task result was skipped or failed, continue
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
continue
cache_key = (iterator._play, original_host, original_task)
try:
task_vars = task_vars_cache[cache_key]
except KeyError:
task_vars = task_vars_cache[cache_key] = variable_manager.get_vars(play=iterator._play, host=original_host, task=original_task)
include_args = include_result.pop('include_args', dict())
special_vars = {}
loop_var = include_result.get('ansible_loop_var', 'item')
index_var = include_result.get('ansible_index_var')
if loop_var in include_result:
task_vars[loop_var] = special_vars[loop_var] = include_result[loop_var]
task_vars['ansible_loop_var'] = special_vars['ansible_loop_var'] = loop_var
if index_var and index_var in include_result:
task_vars[index_var] = special_vars[index_var] = include_result[index_var]
task_vars['ansible_index_var'] = special_vars['ansible_index_var'] = index_var
if '_ansible_item_label' in include_result:
task_vars['_ansible_item_label'] = special_vars['_ansible_item_label'] = include_result['_ansible_item_label']
if 'ansible_loop' in include_result:
task_vars['ansible_loop'] = special_vars['ansible_loop'] = include_result['ansible_loop']
if original_task.no_log and '_ansible_no_log' not in include_args:
task_vars['_ansible_no_log'] = special_vars['_ansible_no_log'] = original_task.no_log
# get search path for this task to pass to lookup plugins that may be used in pathing to
# the included file
task_vars['ansible_search_path'] = original_task.get_search_path()
# ensure basedir is always in (dwim already searches here but we need to display it)
if loader.get_basedir() not in task_vars['ansible_search_path']:
task_vars['ansible_search_path'].append(loader.get_basedir())
templar = Templar(loader=loader, variables=task_vars)
if original_task.action in C._ACTION_INCLUDE_TASKS:
include_file = None
if original_task._parent:
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = original_task._parent
cumulative_path = None
while parent_include is not None:
if not isinstance(parent_include, TaskInclude):
parent_include = parent_include._parent
continue
if isinstance(parent_include, IncludeRole):
parent_include_dir = parent_include._role_path
else:
try:
parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
except AnsibleError as e:
parent_include_dir = ''
display.warning(
'Templating the path of the parent %s failed. The path to the '
'included file may not be found. '
'The error was: %s.' % (original_task.action, to_text(e))
)
if cumulative_path is not None and not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
else:
cumulative_path = parent_include_dir
include_target = templar.template(include_result['include'])
if original_task._role:
dirname = 'handlers' if isinstance(original_task, Handler) else 'tasks'
new_basedir = os.path.join(original_task._role._role_path, dirname, cumulative_path)
candidates = [
loader.path_dwim_relative(original_task._role._role_path, dirname, include_target, is_role=True),
loader.path_dwim_relative(new_basedir, dirname, include_target, is_role=True)
]
for include_file in candidates:
try:
# may throw OSError
os.stat(include_file)
# or select the task file if it exists
break
except OSError:
pass
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
break
else:
parent_include = parent_include._parent
if include_file is None:
if original_task._role:
include_target = templar.template(include_result['include'])
include_file = loader.path_dwim_relative(
original_task._role._role_path,
'handlers' if isinstance(original_task, Handler) else 'tasks',
include_target,
is_role=True)
else:
include_file = loader.path_dwim(include_result['include'])
include_file = templar.template(include_file)
inc_file = IncludedFile(include_file, include_args, special_vars, original_task)
else:
# template the included role's name here
role_name = include_args.pop('name', include_args.pop('role', None))
if role_name is not None:
role_name = templar.template(role_name)
new_task = original_task.copy()
new_task.post_validate(templar=templar)
new_task._role_name = role_name
for from_arg in new_task.FROM_ARGS:
if from_arg in include_args:
from_key = from_arg.removesuffix('_from')
new_task._from_files[from_key] = templar.template(include_args.pop(from_arg))
omit_token = task_vars.get('omit')
if omit_token:
new_task._from_files = remove_omit(new_task._from_files, omit_token)
inc_file = IncludedFile(role_name, include_args, special_vars, new_task, is_role=True)
idx = 0
orig_inc_file = inc_file
while 1:
try:
pos = included_files[idx:].index(orig_inc_file)
# pos is relative to idx since we are slicing
# use idx + pos due to relative indexing
inc_file = included_files[idx + pos]
except ValueError:
included_files.append(orig_inc_file)
inc_file = orig_inc_file
try:
inc_file.add_host(original_host)
inc_file._results.append(res)
except ValueError:
# The host already exists for this include, advance forward, this is a new include
idx += pos + 1
else:
break
return included_files
| 11,735
|
Python
|
.py
| 193
| 38.559585
| 151
| 0.501043
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,083
|
__init__.py
|
ansible_ansible/lib/ansible/playbook/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
display = Display()
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = to_text(os.getcwd(), errors='surrogate_or_strict')
self._loader = loader
self._file_name = None
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager, vars=None):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
cur_basedir = self._loader.get_basedir()
self._loader.set_basedir(self._basedir)
add_all_plugin_dirs(self._basedir)
self._file_name = file_name
try:
ds = self._loader.load_from_file(os.path.basename(file_name))
except UnicodeDecodeError as e:
raise AnsibleParserError("Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e)))
# check for errors and restore the basedir in case this error is caught and handled
if ds is None:
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("Empty playbook, nothing to do: %s" % unfrackpath(file_name), obj=ds)
elif not isinstance(ds, list):
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("A playbook must be a list of plays, got a %s instead: %s" % (type(ds), unfrackpath(file_name)), obj=ds)
elif not ds:
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("A playbook must contain at least one play: %s" % unfrackpath(file_name))
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
# restore the basedir in case this error is caught and handled
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("playbook entries must be either valid plays or 'import_playbook' statements", obj=entry)
if any(action in entry for action in C._ACTION_IMPORT_PLAYBOOK):
pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
if pb is not None:
self._entries.extend(pb._entries)
else:
which = entry
for k in C._ACTION_IMPORT_PLAYBOOK:
if k in entry:
which = entry[k]
break
display.display("skipping playbook '%s' due to conditional test failure" % which, color=C.COLOR_SKIP)
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader, vars=vars)
self._entries.append(entry_obj)
# we're done, so restore the old basedir in the loader
self._loader.set_basedir(cur_basedir)
def get_loader(self):
return self._loader
def get_plays(self):
return self._entries[:]
| 4,740
|
Python
|
.py
| 93
| 42.11828
| 141
| 0.664649
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,084
|
collectionsearch.py
|
ansible_ansible/lib/ansible/playbook/collectionsearch.py
|
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.template import is_template
from ansible.utils.display import Display
from jinja2.nativetypes import NativeEnvironment
display = Display()
def _ensure_default_collection(collection_list=None):
default_collection = AnsibleCollectionConfig.default_collection
# Will be None when used as the default
if collection_list is None:
collection_list = []
# FIXME: exclude role tasks?
if default_collection and default_collection not in collection_list:
collection_list.insert(0, default_collection)
# if there's something in the list, ensure that builtin or legacy is always there too
if collection_list and 'ansible.builtin' not in collection_list and 'ansible.legacy' not in collection_list:
collection_list.append('ansible.legacy')
return collection_list
class CollectionSearch:
# this needs to be populated before we can resolve tasks/roles/etc
collections = FieldAttribute(isa='list', listof=string_types, priority=100, default=_ensure_default_collection,
always_post_validate=True, static=True)
def _load_collections(self, attr, ds):
# We are always a mixin with Base, so we can validate this untemplated
# field early on to guarantee we are dealing with a list.
ds = self.get_validated_value('collections', self.fattributes.get('collections'), ds, None)
# this will only be called if someone specified a value; call the shared value
_ensure_default_collection(collection_list=ds)
if not ds: # don't return an empty collection list, just return None
return None
# This duplicates static attr checking logic from post_validate()
# because if the user attempts to template a collection name, it may
# error before it ever gets to the post_validate() warning (e.g. trying
# to import a role from the collection).
env = NativeEnvironment()
for collection_name in ds:
if is_template(collection_name, env):
display.warning('"collections" is not templatable, but we found: %s, '
'it will not be templated and will be used "as is".' % (collection_name))
return ds
| 2,601
|
Python
|
.py
| 44
| 51.409091
| 115
| 0.716818
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,085
|
base.py
|
ansible_ansible/lib/ansible/playbook/base.py
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import decimal
import itertools
import operator
import os
from copy import copy as shallowcopy
from functools import cache
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute, ConnectionFieldAttribute, NonInheritableFieldAttribute
from ansible.plugins.loader import module_loader, action_loader
from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars, isidentifier, get_unique_id
display = Display()
def _validate_action_group_metadata(action, found_group_metadata, fq_group_name):
valid_metadata = {
'extend_group': {
'types': (list, string_types,),
'errortype': 'list',
},
}
metadata_warnings = []
validate = C.VALIDATE_ACTION_GROUP_METADATA
metadata_only = isinstance(action, dict) and 'metadata' in action and len(action) == 1
if validate and not metadata_only:
found_keys = ', '.join(sorted(list(action)))
metadata_warnings.append("The only expected key is metadata, but got keys: {keys}".format(keys=found_keys))
elif validate:
if found_group_metadata:
metadata_warnings.append("The group contains multiple metadata entries.")
if not isinstance(action['metadata'], dict):
metadata_warnings.append("The metadata is not a dictionary. Got {metadata}".format(metadata=action['metadata']))
else:
unexpected_keys = set(action['metadata'].keys()) - set(valid_metadata.keys())
if unexpected_keys:
metadata_warnings.append("The metadata contains unexpected keys: {0}".format(', '.join(unexpected_keys)))
unexpected_types = []
for field, requirement in valid_metadata.items():
if field not in action['metadata']:
continue
value = action['metadata'][field]
if not isinstance(value, requirement['types']):
unexpected_types.append("%s is %s (expected type %s)" % (field, value, requirement['errortype']))
if unexpected_types:
metadata_warnings.append("The metadata contains unexpected key types: {0}".format(', '.join(unexpected_types)))
if metadata_warnings:
metadata_warnings.insert(0, "Invalid metadata was found for action_group {0} while loading module_defaults.".format(fq_group_name))
display.warning(" ".join(metadata_warnings))
class _ClassProperty:
def __set_name__(self, owner, name):
self.name = name
def __get__(self, obj, objtype=None):
return getattr(objtype, f'_{self.name}')()
class FieldAttributeBase:
fattributes = _ClassProperty()
@classmethod
@cache
def _fattributes(cls):
fattributes = {}
for class_obj in reversed(cls.__mro__):
for name, attr in list(class_obj.__dict__.items()):
if not isinstance(attr, Attribute):
continue
fattributes[name] = attr
if attr.alias:
setattr(class_obj, attr.alias, attr)
fattributes[attr.alias] = attr
return fattributes
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
# other internal params
self._validated = False
self._squashed = False
self._finalized = False
# every object gets a random uuid:
self._uuid = get_unique_id()
# init vars, avoid using defaults in field declaration as it lives across plays
self.vars = dict()
@property
def finalized(self):
return self._finalized
def dump_me(self, depth=0):
""" this is never called from production code, it is here to be used when debugging as a 'complex print' """
if depth == 0:
display.debug("DUMPING OBJECT ------------------------------------------------------")
display.debug("%s- %s (%s, id=%s)" % (" " * depth, self.__class__.__name__, self, id(self)))
if hasattr(self, '_parent') and self._parent:
self._parent.dump_me(depth + 2)
dep_chain = self._parent.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.dump_me(depth + 2)
if hasattr(self, '_play') and self._play:
self._play.dump_me(depth + 2)
def preprocess_data(self, ds):
""" infrequently used method to do some pre-processing of legacy terms """
return ds
def load_data(self, ds, variable_manager=None, loader=None):
""" walk the input datastructure and assign any values """
if ds is None:
raise AnsibleAssertionError('ds (%s) should not be None but it is.' % ds)
# cache the datastructure internally
setattr(self, '_ds', ds)
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class. We sort them based on their priority
# so that certain fields can be loaded before others, if they are dependent.
for name, attr in sorted(self.fattributes.items(), key=operator.itemgetter(1)):
# copy the value over unless a _load_field method is defined
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
setattr(self, name, method(name, ds[name]))
else:
setattr(self, name, ds[name])
# run early, non-critical validation
self.validate()
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _post_validate_debugger(self, attr, value, templar):
value = templar.template(value)
valid_values = frozenset(('always', 'on_failed', 'on_unreachable', 'on_skipped', 'never'))
if value and isinstance(value, string_types) and value not in valid_values:
raise AnsibleParserError("'%s' is not a valid value for debugger. Must be one of %s" % (value, ', '.join(valid_values)), obj=self.get_ds())
return value
def _validate_attributes(self, ds):
"""
Ensures that there are no keys in the datastructure which do
not map to attributes for this object.
"""
valid_attrs = frozenset(self.fattributes)
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
def validate(self, all_vars=None):
""" validation that is done at parse time, not load time """
all_vars = {} if all_vars is None else all_vars
if not self._validated:
# walk all fields in the object
for (name, attribute) in self.fattributes.items():
# run validator only if present
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
else:
# and make sure the attribute is of the type it should be
value = getattr(self, f'_{name}', Sentinel)
if value is not None:
if attribute.isa == 'string' and isinstance(value, (list, dict)):
raise AnsibleParserError(
"The field '%s' is supposed to be a string type,"
" however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds()
)
self._validated = True
def _load_module_defaults(self, name, value):
if value is None:
return
if not isinstance(value, list):
value = [value]
validated_module_defaults = []
for defaults_dict in value:
if not isinstance(defaults_dict, dict):
raise AnsibleParserError(
"The field 'module_defaults' is supposed to be a dictionary or list of dictionaries, "
"the keys of which must be static action, module, or group names. Only the values may contain "
"templates. For example: {'ping': \"{{ ping_defaults }}\"}"
)
validated_defaults_dict = {}
for defaults_entry, defaults in defaults_dict.items():
# module_defaults do not use the 'collections' keyword, so actions and
# action_groups that are not fully qualified are part of the 'ansible.legacy'
# collection. Update those entries here, so module_defaults contains
# fully qualified entries.
if defaults_entry.startswith('group/'):
group_name = defaults_entry.split('group/')[-1]
# The resolved action_groups cache is associated saved on the current Play
if self.play is not None:
group_name, dummy = self._resolve_group(group_name)
defaults_entry = 'group/' + group_name
validated_defaults_dict[defaults_entry] = defaults
else:
if len(defaults_entry.split('.')) < 3:
defaults_entry = 'ansible.legacy.' + defaults_entry
resolved_action = self._resolve_action(defaults_entry)
if resolved_action:
validated_defaults_dict[resolved_action] = defaults
# If the defaults_entry is an ansible.legacy plugin, these defaults
# are inheritable by the 'ansible.builtin' subset, but are not
# required to exist.
if defaults_entry.startswith('ansible.legacy.'):
resolved_action = self._resolve_action(
defaults_entry.replace('ansible.legacy.', 'ansible.builtin.'),
mandatory=False
)
if resolved_action:
validated_defaults_dict[resolved_action] = defaults
validated_module_defaults.append(validated_defaults_dict)
return validated_module_defaults
@property
def play(self):
if hasattr(self, '_play'):
play = self._play
elif hasattr(self, '_parent') and hasattr(self._parent, '_play'):
play = self._parent._play
else:
play = self
if play.__class__.__name__ != 'Play':
# Should never happen, but handle gracefully by returning None, just in case
return None
return play
def _resolve_group(self, fq_group_name, mandatory=True):
if not AnsibleCollectionRef.is_valid_fqcr(fq_group_name):
collection_name = 'ansible.builtin'
fq_group_name = collection_name + '.' + fq_group_name
else:
collection_name = '.'.join(fq_group_name.split('.')[0:2])
# Check if the group has already been resolved and cached
if fq_group_name in self.play._group_actions:
return fq_group_name, self.play._group_actions[fq_group_name]
try:
action_groups = _get_collection_metadata(collection_name).get('action_groups', {})
except ValueError:
if not mandatory:
display.vvvvv("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
return fq_group_name, []
raise AnsibleParserError("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
# The collection may or may not use the fully qualified name
# Don't fail if the group doesn't exist in the collection
resource_name = fq_group_name.split(collection_name + '.')[-1]
action_group = action_groups.get(
fq_group_name,
action_groups.get(resource_name)
)
if action_group is None:
if not mandatory:
display.vvvvv("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
return fq_group_name, []
raise AnsibleParserError("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
resolved_actions = []
include_groups = []
found_group_metadata = False
for action in action_group:
# Everything should be a string except the metadata entry
if not isinstance(action, string_types):
_validate_action_group_metadata(action, found_group_metadata, fq_group_name)
if isinstance(action['metadata'], dict):
found_group_metadata = True
include_groups = action['metadata'].get('extend_group', [])
if isinstance(include_groups, string_types):
include_groups = [include_groups]
if not isinstance(include_groups, list):
# Bad entries may be a warning above, but prevent tracebacks by setting it back to the acceptable type.
include_groups = []
continue
# The collection may or may not use the fully qualified name.
# If not, it's part of the current collection.
if not AnsibleCollectionRef.is_valid_fqcr(action):
action = collection_name + '.' + action
resolved_action = self._resolve_action(action, mandatory=False)
if resolved_action:
resolved_actions.append(resolved_action)
for action in resolved_actions:
if action not in self.play._action_groups:
self.play._action_groups[action] = []
self.play._action_groups[action].append(fq_group_name)
self.play._group_actions[fq_group_name] = resolved_actions
# Resolve extended groups last, after caching the group in case they recursively refer to each other
for include_group in include_groups:
if not AnsibleCollectionRef.is_valid_fqcr(include_group):
include_group = collection_name + '.' + include_group
dummy, group_actions = self._resolve_group(include_group, mandatory=False)
for action in group_actions:
if action not in self.play._action_groups:
self.play._action_groups[action] = []
self.play._action_groups[action].append(fq_group_name)
self.play._group_actions[fq_group_name].extend(group_actions)
resolved_actions.extend(group_actions)
return fq_group_name, resolved_actions
def _resolve_action(self, action_name, mandatory=True):
context = module_loader.find_plugin_with_context(action_name, ignore_deprecated=(not mandatory))
if context.resolved and not context.action_plugin:
prefer = action_loader.find_plugin_with_context(action_name, ignore_deprecated=(not mandatory))
if prefer.resolved:
context = prefer
elif not context.resolved:
context = action_loader.find_plugin_with_context(action_name, ignore_deprecated=(not mandatory))
if context.resolved:
return context.resolved_fqcn
if mandatory:
raise AnsibleParserError("Could not resolve action %s in module_defaults" % action_name)
display.vvvvv("Could not resolve action %s in module_defaults" % action_name)
def squash(self):
"""
Evaluates all attributes and sets them to the evaluated version,
so that all future accesses of attributes do not need to evaluate
parent attributes.
"""
if not self._squashed:
for name in self.fattributes:
setattr(self, name, getattr(self, name))
self._squashed = True
def copy(self):
"""
Create a copy of this object and return it.
"""
try:
new_me = self.__class__()
except RuntimeError as e:
raise AnsibleError("Exceeded maximum object depth. This may have been caused by excessive role recursion", orig_exc=e)
for name in self.fattributes:
setattr(new_me, name, shallowcopy(getattr(self, f'_{name}', Sentinel)))
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
new_me._validated = self._validated
new_me._finalized = self._finalized
new_me._uuid = self._uuid
# if the ds value was set on the object, copy it to the new copy too
if hasattr(self, '_ds'):
new_me._ds = self._ds
return new_me
def get_validated_value(self, name, attribute, value, templar):
if attribute.isa == 'string':
value = to_text(value)
elif attribute.isa == 'int':
if not isinstance(value, int):
try:
if (decimal_value := decimal.Decimal(value)) != (int_value := int(decimal_value)):
raise decimal.DecimalException(f'Floating-point value {value!r} would be truncated.')
value = int_value
except decimal.DecimalException as e:
raise ValueError from e
elif attribute.isa == 'float':
value = float(value)
elif attribute.isa == 'bool':
value = boolean(value, strict=True)
elif attribute.isa == 'percent':
# special value, which may be an integer or float
# with an optional '%' at the end
if isinstance(value, string_types) and '%' in value:
value = value.replace('%', '')
value = float(value)
elif attribute.isa == 'list':
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
if attribute.listof is not None:
for item in value:
if not isinstance(item, attribute.listof):
raise AnsibleParserError("the field '%s' should be a list of %s, "
"but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
elif attribute.required and attribute.listof == string_types:
if item is None or item.strip() == "":
raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
elif attribute.isa == 'set':
if value is None:
value = set()
elif not isinstance(value, (list, set)):
if isinstance(value, string_types):
value = value.split(',')
else:
# Making a list like this handles strings of
# text and bytes properly
value = [value]
if not isinstance(value, set):
value = set(value)
elif attribute.isa == 'dict':
if value is None:
value = dict()
elif not isinstance(value, dict):
raise TypeError("%s is not a dictionary" % value)
elif attribute.isa == 'class':
if not isinstance(value, attribute.class_type):
raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
value.post_validate(templar=templar)
else:
raise AnsibleAssertionError(f"Unknown value for attribute.isa: {attribute.isa}")
return value
def set_to_context(self, name):
""" set to parent inherited value or Sentinel as appropriate"""
attribute = self.fattributes[name]
if isinstance(attribute, NonInheritableFieldAttribute):
# setting to sentinel will trigger 'default/default()' on getter
setattr(self, name, Sentinel)
else:
try:
setattr(self, name, self._get_parent_attribute(name, omit=True))
except AttributeError:
# mostly playcontext as only tasks/handlers/blocks really resolve parent
setattr(self, name, Sentinel)
def post_validate(self, templar):
"""
we can't tell that everything is of the right type until we have
all the variables. Run basic types (from isa) as well as
any _post_validate_<foo> functions.
"""
# save the omit value for later checking
omit_value = templar.available_variables.get('omit')
for (name, attribute) in self.fattributes.items():
if attribute.static:
value = getattr(self, name)
# we don't template 'vars' but allow template as values for later use
if name not in ('vars',) and templar.is_template(value):
display.warning('"%s" is not templatable, but we found: %s, '
'it will not be templated and will be used "as is".' % (name, value))
continue
if getattr(self, name) is None:
if not attribute.required:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
# Intermediate objects like Play() won't have their fields validated by
# default, as their values are often inherited by other objects and validated
# later, so we don't want them to fail out early
continue
try:
# Run the post-validator if present. These methods are responsible for
# using the given templar to template the values, if required.
method = getattr(self, '_post_validate_%s' % name, None)
if method:
value = method(attribute, getattr(self, name), templar)
elif attribute.isa == 'class':
value = getattr(self, name)
else:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
# If this evaluated to the omit value, set the value back to inherited by context
# or default specified in the FieldAttribute and move on
if omit_value is not None and value == omit_value:
self.set_to_context(name)
continue
# and make sure the attribute is of the type it should be
if value is not None:
value = self.get_validated_value(name, attribute, value, templar)
# and assign the massaged value back to the attribute field
setattr(self, name, value)
except (TypeError, ValueError) as e:
value = getattr(self, name)
raise AnsibleParserError(f"the field '{name}' has an invalid value ({value!r}), and could not be converted to {attribute.isa}.",
obj=self.get_ds(), orig_exc=e)
except (AnsibleUndefinedVariable, UndefinedError) as e:
if templar._fail_on_undefined_errors and name != 'name':
if name == 'args':
msg = "The task includes an option with an undefined variable."
else:
msg = f"The field '{name}' has an invalid value, which includes an undefined variable."
raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)
self._finalized = True
def _load_vars(self, attr, ds):
"""
Vars in a play must be specified as a dictionary.
"""
def _validate_variable_keys(ds):
for key in ds:
if not isidentifier(key):
raise TypeError("'%s' is not a valid variable name" % key)
try:
if isinstance(ds, dict):
_validate_variable_keys(ds)
return combine_vars(self.vars, ds)
elif ds is None:
return {}
else:
raise ValueError
except ValueError as e:
raise AnsibleParserError("Vars in a %s must be specified as a dictionary" % self.__class__.__name__,
obj=ds, orig_exc=e)
except TypeError as e:
raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds, orig_exc=e)
def _extend_value(self, value, new_value, prepend=False):
"""
Will extend the value given with new_value (and will turn both
into lists if they are not so already). The values are run through
a set to remove duplicate values.
"""
if not isinstance(value, list):
value = [value]
if not isinstance(new_value, list):
new_value = [new_value]
# Due to where _extend_value may run for some attributes
# it is possible to end up with Sentinel in the list of values
# ensure we strip them
value = [v for v in value if v is not Sentinel]
new_value = [v for v in new_value if v is not Sentinel]
if prepend:
combined = new_value + value
else:
combined = value + new_value
return [i for i, dummy in itertools.groupby(combined) if i is not None]
def dump_attrs(self):
"""
Dumps all attributes to a dictionary
"""
attrs = {}
for (name, attribute) in self.fattributes.items():
attr = getattr(self, name)
if attribute.isa == 'class' and hasattr(attr, 'serialize'):
attrs[name] = attr.serialize()
else:
attrs[name] = attr
return attrs
def from_attrs(self, attrs):
"""
Loads attributes from a dictionary
"""
for (attr, value) in attrs.items():
if attr in self.fattributes:
attribute = self.fattributes[attr]
if attribute.isa == 'class' and isinstance(value, dict):
obj = attribute.class_type()
obj.deserialize(value)
setattr(self, attr, obj)
else:
setattr(self, attr, value)
# from_attrs is only used to create a finalized task
# from attrs from the Worker/TaskExecutor
# Those attrs are finalized and squashed in the TE
# and controller side use needs to reflect that
self._finalized = True
self._squashed = True
def serialize(self):
"""
Serializes the object derived from the base object into
a dictionary of values. This only serializes the field
attributes for the object, so this may need to be overridden
for any classes which wish to add additional items not stored
as field attributes.
"""
repr = self.dump_attrs()
# serialize the uuid field
repr['uuid'] = self._uuid
repr['finalized'] = self._finalized
repr['squashed'] = self._squashed
return repr
def deserialize(self, data):
"""
Given a dictionary of values, load up the field attributes for
this object. As with serialize(), if there are any non-field
attribute data members, this method will need to be overridden
and extended.
"""
if not isinstance(data, dict):
raise AnsibleAssertionError('data (%s) should be a dict but is a %s' % (data, type(data)))
for (name, attribute) in self.fattributes.items():
if name in data:
setattr(self, name, data[name])
else:
self.set_to_context(name)
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
self._finalized = data.get('finalized', False)
self._squashed = data.get('squashed', False)
class Base(FieldAttributeBase):
name = NonInheritableFieldAttribute(isa='string', default='', always_post_validate=True)
# connection/transport
connection = ConnectionFieldAttribute(isa='string', default=context.cliargs_deferred_get('connection'))
port = FieldAttribute(isa='int')
remote_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('remote_user'))
# variables
vars = NonInheritableFieldAttribute(isa='dict', priority=100, static=True)
# module default params
module_defaults = FieldAttribute(isa='list', extend=True, prepend=True)
# flags and misc. settings
environment = FieldAttribute(isa='list', extend=True, prepend=True)
no_log = FieldAttribute(isa='bool', default=C.DEFAULT_NO_LOG)
run_once = FieldAttribute(isa='bool')
ignore_errors = FieldAttribute(isa='bool')
ignore_unreachable = FieldAttribute(isa='bool')
check_mode = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('check'))
diff = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('diff'))
any_errors_fatal = FieldAttribute(isa='bool', default=C.ANY_ERRORS_FATAL)
throttle = FieldAttribute(isa='int', default=0)
timeout = FieldAttribute(isa='int', default=C.TASK_TIMEOUT)
# explicitly invoke a debugger on tasks
debugger = FieldAttribute(isa='string')
# Privilege escalation
become = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('become'))
become_method = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_method'))
become_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_user'))
become_flags = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_flags'))
become_exe = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_exe'))
# used to hold sudo/su stuff
DEPRECATED_ATTRIBUTES = [] # type: list[str]
def get_path(self):
""" return the absolute path of the playbook object and its line number """
path = ""
try:
path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
except AttributeError:
try:
path = "%s:%s" % (self._parent._play._ds._data_source, self._parent._play._ds._line_number)
except AttributeError:
pass
return path
def get_dep_chain(self):
if hasattr(self, '_parent') and self._parent:
return self._parent.get_dep_chain()
else:
return None
def get_search_path(self):
"""
Return the list of paths you should search for files, in order.
This follows role/playbook dependency chain.
"""
path_stack = []
dep_chain = self.get_dep_chain()
# inside role: add the dependency chain from current to dependent
if dep_chain:
path_stack.extend(reversed([x._role_path for x in dep_chain if hasattr(x, '_role_path')]))
# add path of task itself, unless it is already in the list
task_dir = os.path.dirname(self.get_path())
if task_dir not in path_stack:
path_stack.append(task_dir)
return path_stack
| 33,536
|
Python
|
.py
| 649
| 39.405239
| 151
| 0.60022
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,086
|
task.py
|
ansible_ansible/lib/ansible/playbook/task.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.plugins.loader import lookup_loader
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
from ansible.playbook.delegatable import Delegatable
from ansible.playbook.loop_control import LoopControl
from ansible.playbook.notifiable import Notifiable
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.vars import isidentifier
__all__ = ['Task']
display = Display()
class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatable):
"""
A task is a language feature that represents a call to a module, with given arguments and other parameters.
A handler is a subclass of a task.
Usage:
Task.load(datastructure) -> Task
Task.something(...)
"""
# =================================================================================
# ATTRIBUTES
# load_<attribute_name> and
# validate_<attribute_name>
# will be used if defined
# might be possible to define others
# NOTE: ONLY set defaults on task attributes that are not inheritable,
# inheritance is only triggered if the 'current value' is Sentinel,
# default can be set at play/top level object and inheritance will take it's course.
args = NonInheritableFieldAttribute(isa='dict', default=dict)
action = NonInheritableFieldAttribute(isa='string')
async_val = NonInheritableFieldAttribute(isa='int', default=0, alias='async')
changed_when = NonInheritableFieldAttribute(isa='list', default=list)
delay = NonInheritableFieldAttribute(isa='float', default=5)
failed_when = NonInheritableFieldAttribute(isa='list', default=list)
loop = NonInheritableFieldAttribute(isa='list')
loop_control = NonInheritableFieldAttribute(isa='class', class_type=LoopControl, default=LoopControl)
poll = NonInheritableFieldAttribute(isa='int', default=C.DEFAULT_POLL_INTERVAL)
register = NonInheritableFieldAttribute(isa='string', static=True)
retries = NonInheritableFieldAttribute(isa='int') # default is set in TaskExecutor
until = NonInheritableFieldAttribute(isa='list', default=list)
# deprecated, used to be loop and loop_args but loop has been repurposed
loop_with = NonInheritableFieldAttribute(isa='string', private=True)
def __init__(self, block=None, role=None, task_include=None):
""" constructors a task, without the Task.load classmethod, it will be pretty blank """
self._role = role
self._parent = None
self.implicit = False
self.resolved_action = None
if task_include:
self._parent = task_include
else:
self._parent = block
super(Task, self).__init__()
def get_name(self, include_role_fqcn=True):
""" return the name of the task """
if self._role:
role_name = self._role.get_name(include_role_fqcn=include_role_fqcn)
if self._role and self.name:
return "%s : %s" % (role_name, self.name)
elif self.name:
return self.name
else:
if self._role:
return "%s : %s" % (role_name, self.action)
else:
return "%s" % (self.action,)
def _merge_kv(self, ds):
if ds is None:
return ""
elif isinstance(ds, string_types):
return ds
elif isinstance(ds, dict):
buf = ""
for (k, v) in ds.items():
if k.startswith('_'):
continue
buf = buf + "%s=%s " % (k, v)
buf = buf.strip()
return buf
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Task(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def __repr__(self):
""" returns a human-readable representation of the task """
if self.action in C._ACTION_META:
return "TASK: meta (%s)" % self.args['_raw_params']
else:
return "TASK: %s" % self.get_name()
def _preprocess_with_loop(self, ds, new_ds, k, v):
""" take a lookup plugin name and store it correctly """
loop_name = k.removeprefix("with_")
if new_ds.get('loop') is not None or new_ds.get('loop_with') is not None:
raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
if v is None:
raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
new_ds['loop_with'] = loop_name
new_ds['loop'] = v
# display.deprecated("with_ type loops are being phased out, use the 'loop' keyword instead",
# version="2.10", collection_name='ansible.builtin')
def preprocess_data(self, ds):
"""
tasks are especially complex arguments so need pre-processing.
keep it short.
"""
if not isinstance(ds, dict):
raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure suitable for the
# attributes of the task class
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator
default_collection = AnsibleCollectionConfig.default_collection
collections_list = ds.get('collections')
if collections_list is None:
# use the parent value if our ds doesn't define it
collections_list = self.collections
else:
# Validate this untemplated field early on to guarantee we are dealing with a list.
# This is also done in CollectionSearch._load_collections() but this runs before that call.
collections_list = self.get_validated_value('collections', self.fattributes.get('collections'), collections_list, None)
if default_collection and not self._role: # FIXME: and not a collections role
if collections_list:
if default_collection not in collections_list:
collections_list.insert(0, default_collection)
else:
collections_list = [default_collection]
if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not in collections_list:
collections_list.append('ansible.legacy')
if collections_list:
ds['collections'] = collections_list
# use the args parsing class to determine the action, args,
# and the delegate_to value from the various possible forms
# supported as legacy
args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list)
try:
(action, args, delegate_to) = args_parser.parse()
except AnsibleParserError as e:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
if e.obj:
raise
# But if it wasn't, we can add the yaml object now to get more detail
raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)
else:
# Set the resolved action plugin (or if it does not exist, module) for callbacks.
self.resolved_action = args_parser.resolved_action
# the command/shell/script modules used to support the `cmd` arg,
# which corresponds to what we now call _raw_params, so move that
# value over to _raw_params (assuming it is empty)
if action in C._ACTION_HAS_CMD:
if 'cmd' in args:
if args.get('_raw_params', '') != '':
raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
" Please put everything in one or the other place.", obj=ds)
args['_raw_params'] = args.pop('cmd')
new_ds['action'] = action
new_ds['args'] = args
new_ds['delegate_to'] = delegate_to
# we handle any 'vars' specified in the ds here, as we may
# be adding things to them below (special handling for includes).
# When that deprecated feature is removed, this can be too.
if 'vars' in ds:
# _load_vars is defined in Base, and is used to load a dictionary
# or list of dictionaries in a standard way
new_ds['vars'] = self._load_vars(None, ds.get('vars'))
else:
new_ds['vars'] = dict()
for (k, v) in ds.items():
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
# we don't want to re-assign these values, which were determined by the ModuleArgsParser() above
continue
elif k.startswith('with_') and k.removeprefix("with_") in lookup_loader:
# transform into loop property
self._preprocess_with_loop(ds, new_ds, k, v)
elif C.INVALID_TASK_ATTRIBUTE_FAILED or k in self.fattributes:
new_ds[k] = v
else:
display.warning("Ignoring invalid attribute: %s" % k)
return super(Task, self).preprocess_data(new_ds)
def _load_loop_control(self, attr, ds):
if not isinstance(ds, dict):
raise AnsibleParserError(
"the `loop_control` value must be specified as a dictionary and cannot "
"be a variable itself (though it can contain variables)",
obj=ds,
)
return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader)
def _validate_attributes(self, ds):
try:
super(Task, self)._validate_attributes(ds)
except AnsibleParserError as e:
e.message += '\nThis error can be suppressed as a warning using the "invalid_task_attribute_failed" configuration'
raise e
def _validate_changed_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def _validate_failed_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def _validate_register(self, attr, name, value):
if value is not None and not isidentifier(value):
raise AnsibleParserError(f"Invalid variable name in 'register' specified: '{value}'")
def post_validate(self, templar):
"""
Override of base class post_validate, to also do final validation on
the block and task include (if any) to which this task belongs.
"""
if self._parent:
self._parent.post_validate(templar)
if AnsibleCollectionConfig.default_collection:
pass
super(Task, self).post_validate(templar)
def _post_validate_loop(self, attr, value, templar):
"""
Override post validation for the loop field, which is templated
specially in the TaskExecutor class when evaluating loops.
"""
return value
def _post_validate_environment(self, attr, value, templar):
"""
Override post validation of vars on the play, as we don't want to
template these too early.
"""
env = {}
if value is not None:
def _parse_env_kv(k, v):
try:
env[k] = templar.template(v, convert_bare=False)
except AnsibleUndefinedVariable as e:
error = to_native(e)
if self.action in C._ACTION_FACT_GATHERING and 'ansible_facts.env' in error or 'ansible_env' in error:
# ignore as fact gathering is required for 'env' facts
return
raise
if isinstance(value, list):
for env_item in value:
if isinstance(env_item, dict):
for k in env_item:
_parse_env_kv(k, env_item[k])
else:
isdict = templar.template(env_item, convert_bare=False)
if isinstance(isdict, dict):
env |= isdict
else:
display.warning("could not parse environment value, skipping: %s" % value)
elif isinstance(value, dict):
# should not really happen
env = dict()
for env_item in value:
_parse_env_kv(env_item, value[env_item])
else:
# at this point it should be a simple string, also should not happen
env = templar.template(value, convert_bare=False)
return env
def _post_validate_changed_when(self, attr, value, templar):
"""
changed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
"""
return value
def _post_validate_failed_when(self, attr, value, templar):
"""
failed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
"""
return value
def _post_validate_until(self, attr, value, templar):
"""
until is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
"""
return value
def get_vars(self):
all_vars = dict()
if self._parent:
all_vars |= self._parent.get_vars()
all_vars |= self.vars
if 'tags' in all_vars:
del all_vars['tags']
if 'when' in all_vars:
del all_vars['when']
return all_vars
def get_include_params(self):
all_vars = dict()
if self._parent:
all_vars |= self._parent.get_include_params()
if self.action in C._ACTION_ALL_INCLUDES:
all_vars |= self.vars
return all_vars
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(Task, self).copy()
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
new_me._role = self._role
new_me.implicit = self.implicit
new_me.resolved_action = self.resolved_action
new_me._uuid = self._uuid
return new_me
def serialize(self):
data = super(Task, self).serialize()
if not self._squashed and not self._finalized:
if self._parent:
data['parent'] = self._parent.serialize()
data['parent_type'] = self._parent.__class__.__name__
if self._role:
data['role'] = self._role.serialize()
data['implicit'] = self.implicit
data['resolved_action'] = self.resolved_action
return data
def deserialize(self, data):
# import is here to avoid import loops
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
parent_data = data.get('parent', None)
if parent_data:
parent_type = data.get('parent_type')
if parent_type == 'Block':
p = Block()
elif parent_type == 'TaskInclude':
p = TaskInclude()
elif parent_type == 'HandlerTaskInclude':
p = HandlerTaskInclude()
p.deserialize(parent_data)
self._parent = p
del data['parent']
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
del data['role']
self.implicit = data.get('implicit', False)
self.resolved_action = data.get('resolved_action')
super(Task, self).deserialize(data)
def set_loader(self, loader):
"""
Sets the loader on this object and recursively on parent, child objects.
This is used primarily after the Task has been serialized/deserialized, which
does not preserve the loader.
"""
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
def _get_parent_attribute(self, attr, omit=False):
"""
Generic logic to get the attribute or parent attribute for a task value.
"""
fattr = self.fattributes[attr]
extend = fattr.extend
prepend = fattr.prepend
try:
# omit self, and only get parent values
if omit:
value = Sentinel
else:
value = getattr(self, f'_{attr}', Sentinel)
# If parent is static, we can grab attrs from the parent
# otherwise, defer to the grandparent
if getattr(self._parent, 'statically_loaded', True):
_parent = self._parent
else:
_parent = self._parent._parent
if _parent and (value is Sentinel or extend):
if getattr(_parent, 'statically_loaded', True):
# vars are always inheritable, other attributes might not be for the parent but still should be for other ancestors
if attr != 'vars' and hasattr(_parent, '_get_parent_attribute'):
parent_value = _parent._get_parent_attribute(attr)
else:
parent_value = getattr(_parent, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except KeyError:
pass
return value
def all_parents_static(self):
if self._parent:
return self._parent.all_parents_static()
return True
def get_first_parent_include(self):
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude):
return self._parent
return self._parent.get_first_parent_include()
return None
def get_play(self):
parent = self._parent
while not isinstance(parent, Block):
parent = parent._parent
return parent._play
| 20,466
|
Python
|
.py
| 427
| 37.405152
| 135
| 0.6181
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,087
|
handler_task_include.py
|
ansible_ansible/lib/ansible/playbook/handler_task_include.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
# from ansible.inventory.host import Host
from ansible.playbook.handler import Handler
from ansible.playbook.task_include import TaskInclude
class HandlerTaskInclude(Handler, TaskInclude):
VALID_INCLUDE_KEYWORDS = TaskInclude.VALID_INCLUDE_KEYWORDS.union(('listen',))
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = HandlerTaskInclude(block=block, role=role, task_include=task_include)
handler = t.check_options(
t.load_data(data, variable_manager=variable_manager, loader=loader),
data
)
return handler
| 1,391
|
Python
|
.py
| 30
| 42.866667
| 97
| 0.757016
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,088
|
playbook_include.py
|
ansible_ansible/lib/ansible/playbook/playbook_include.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
import ansible.constants as C
from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.common.text.converters import to_bytes
from ansible.module_utils.six import string_types
from ansible.parsing.splitter import split_args
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
from ansible.template import Templar
from ansible.utils.display import Display
display = Display()
class PlaybookInclude(Base, Conditional, Taggable):
import_playbook = NonInheritableFieldAttribute(isa='string')
vars_val = NonInheritableFieldAttribute(isa='dict', default=dict, alias='vars')
@staticmethod
def load(data, basedir, variable_manager=None, loader=None):
return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
def load_data(self, ds, variable_manager=None, loader=None, basedir=None):
"""
Overrides the base load_data(), as we're actually going to return a new
Playbook() object rather than a PlaybookInclude object
"""
# import here to avoid a dependency loop
from ansible.playbook import Playbook
from ansible.playbook.play import Play
# first, we use the original parent method to correctly load the object
# via the load_data/preprocess_data system we normally use for other
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
all_vars = self.vars.copy()
if variable_manager:
all_vars |= variable_manager.get_vars()
templar = Templar(loader=loader, variables=all_vars)
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
file_name = templar.template(new_obj.import_playbook)
# check for FQCN
resource = _get_collection_playbook_path(file_name)
if resource is not None:
playbook = resource[1]
playbook_collection = resource[2]
else:
# not FQCN try path
playbook = file_name
if not os.path.isabs(playbook):
playbook = os.path.join(basedir, playbook)
# might still be collection playbook
playbook_collection = _get_collection_name_from_path(playbook)
if playbook_collection:
# it is a collection playbook, setup default collections
AnsibleCollectionConfig.default_collection = playbook_collection
else:
# it is NOT a collection playbook, setup adjacent paths
AnsibleCollectionConfig.playbook_paths.append(os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict'))))
pb._load_playbook_data(file_name=playbook, variable_manager=variable_manager, vars=self.vars.copy())
# finally, update each loaded playbook entry with any variables specified
# on the included playbook and/or any tags which may have been set
for entry in pb._entries:
# conditional includes on a playbook need a marker to skip gathering
if new_obj.when and isinstance(entry, Play):
entry._included_conditional = new_obj.when[:]
temp_vars = entry.vars | new_obj.vars
param_tags = temp_vars.pop('tags', None)
if param_tags is not None:
entry.tags.extend(param_tags.split(','))
entry.vars = temp_vars
entry.tags = list(set(entry.tags).union(new_obj.tags))
if entry._included_path is None:
entry._included_path = os.path.dirname(playbook)
# Check to see if we need to forward the conditionals on to the included
# plays. If so, we can take a shortcut here and simply prepend them to
# those attached to each block (if any)
if new_obj.when:
for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):
task_block._when = new_obj.when[:] + task_block.when[:]
return pb
def preprocess_data(self, ds):
"""
Reorganizes the data for a PlaybookInclude datastructure to line
up with what we expect the proper attributes to be
"""
if not isinstance(ds, dict):
raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
for (k, v) in ds.items():
if k in C._ACTION_IMPORT_PLAYBOOK:
self._preprocess_import(ds, new_ds, k, v)
else:
# some basic error checking, to make sure vars are properly
# formatted and do not conflict with k=v parameters
if k == 'vars':
if 'vars' in new_ds:
raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
elif not isinstance(v, dict):
raise AnsibleParserError("vars for import_playbook statements must be specified as a dictionary", obj=ds)
new_ds[k] = v
return super(PlaybookInclude, self).preprocess_data(new_ds)
def _preprocess_import(self, ds, new_ds, k, v):
"""
Splits the playbook import line up into filename and parameters
"""
if v is None:
raise AnsibleParserError("playbook import parameter is missing", obj=ds)
elif not isinstance(v, string_types):
raise AnsibleParserError("playbook import parameter must be a string indicating a file path, got %s instead" % type(v), obj=ds)
# The import_playbook line must include at least one item, which is the filename
# to import. Anything after that should be regarded as a parameter to the import
items = split_args(v)
if len(items) == 0:
raise AnsibleParserError("import_playbook statements must specify the file name to import", obj=ds)
new_ds['import_playbook'] = items[0].strip()
| 7,492
|
Python
|
.py
| 137
| 45.656934
| 144
| 0.681142
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,089
|
loop_control.py
|
ansible_ansible/lib/ansible/playbook/loop_control.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import FieldAttributeBase
class LoopControl(FieldAttributeBase):
loop_var = NonInheritableFieldAttribute(isa='string', default='item', always_post_validate=True)
index_var = NonInheritableFieldAttribute(isa='string', always_post_validate=True)
label = NonInheritableFieldAttribute(isa='string')
pause = NonInheritableFieldAttribute(isa='float', default=0, always_post_validate=True)
extended = NonInheritableFieldAttribute(isa='bool', always_post_validate=True)
extended_allitems = NonInheritableFieldAttribute(isa='bool', default=True, always_post_validate=True)
break_when = NonInheritableFieldAttribute(isa='list', default=list)
def __init__(self):
super(LoopControl, self).__init__()
@staticmethod
def load(data, variable_manager=None, loader=None):
t = LoopControl()
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def _post_validate_break_when(self, attr, value, templar):
"""
break_when is evaluated after the execution of the loop is complete,
and should not be templated during the regular post_validate step.
"""
return value
| 2,022
|
Python
|
.py
| 39
| 47.871795
| 105
| 0.757975
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,090
|
helpers.py
|
ansible_ansible/lib/ansible/playbook/helpers.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.common.text.converters import to_native
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.utils.display import Display
display = Display()
def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
"""
Given a list of mixed task/block data (parsed from YAML),
return a list of Block() objects, where implicit blocks
are created for each bare Task.
"""
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
if not isinstance(ds, (list, type(None))):
raise AnsibleAssertionError('%s should be a list or None but is %s' % (ds, type(ds)))
block_list = []
if ds:
count = iter(range(len(ds)))
for i in count:
block_ds = ds[i]
# Implicit blocks are created by bare tasks listed in a play without
# an explicit block statement. If we have two implicit blocks in a row,
# squash them down to a single block to save processing time later.
implicit_blocks = []
while block_ds is not None and not Block.is_block(block_ds):
implicit_blocks.append(block_ds)
i += 1
# Advance the iterator, so we don't repeat
next(count, None)
try:
block_ds = ds[i]
except IndexError:
block_ds = None
# Loop both implicit blocks and block_ds as block_ds is the next in the list
for b in (implicit_blocks, block_ds):
if b:
block_list.append(
Block.load(
b,
play=play,
parent_block=parent_block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
)
)
return block_list
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
"""
Given a list of task datastructures (parsed from YAML),
return a list of Task() or TaskInclude() objects.
"""
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.playbook.handler_task_include import HandlerTaskInclude
from ansible.template import Templar
if not isinstance(ds, list):
raise AnsibleAssertionError('The ds (%s) should be a list but was a %s' % (ds, type(ds)))
task_list = []
for task_ds in ds:
if not isinstance(task_ds, dict):
raise AnsibleAssertionError('The ds (%s) should be a dict but was a %s' % (ds, type(ds)))
if 'block' in task_ds:
if use_handlers:
raise AnsibleParserError("Using a block as a handler is not supported.", obj=task_ds)
t = Block.load(
task_ds,
play=play,
parent_block=block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
)
task_list.append(t)
else:
args_parser = ModuleArgsParser(task_ds)
try:
(action, args, delegate_to) = args_parser.parse(skip_action_validation=True)
except AnsibleParserError as e:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
if e.obj:
raise
# But if it wasn't, we can add the yaml object now to get more detail
raise AnsibleParserError(to_native(e), obj=task_ds, orig_exc=e)
if action in C._ACTION_ALL_INCLUDE_IMPORT_TASKS:
if use_handlers:
include_class = HandlerTaskInclude
else:
include_class = TaskInclude
t = include_class.load(
task_ds,
block=block,
role=role,
task_include=None,
variable_manager=variable_manager,
loader=loader
)
all_vars = variable_manager.get_vars(play=play, task=t)
templar = Templar(loader=loader, variables=all_vars)
# check to see if this include is dynamic or static:
if action in C._ACTION_IMPORT_TASKS:
if t.loop is not None:
raise AnsibleParserError("You cannot use loops on 'import_tasks' statements. You should use 'include_tasks' instead.", obj=task_ds)
# we set a flag to indicate this include was static
t.statically_loaded = True
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = block
cumulative_path = None
found = False
subdir = 'tasks'
if use_handlers:
subdir = 'handlers'
while parent_include is not None:
if not isinstance(parent_include, TaskInclude):
parent_include = parent_include._parent
continue
try:
parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
except AnsibleUndefinedVariable as e:
if not parent_include.statically_loaded:
raise AnsibleParserError(
"Error when evaluating variable in dynamic parent include path: %s. "
"When using static imports, the parent dynamic include cannot utilize host facts "
"or variables from inventory" % parent_include.args.get('_raw_params'),
obj=task_ds,
suppress_extended_error=True,
orig_exc=e
)
raise
if cumulative_path is None:
cumulative_path = parent_include_dir
elif not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
include_target = templar.template(t.args['_raw_params'])
if t._role:
new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path)
include_file = loader.path_dwim_relative(new_basedir, subdir, include_target)
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
found = True
break
else:
parent_include = parent_include._parent
if not found:
try:
include_target = templar.template(t.args['_raw_params'])
except AnsibleUndefinedVariable as e:
raise AnsibleParserError(
"Error when evaluating variable in import path: %s.\n\n"
"When using static imports, ensure that any variables used in their names are defined in vars/vars_files\n"
"or extra-vars passed in from the command line. Static imports cannot use variables from facts or inventory\n"
"sources like group or host vars." % t.args['_raw_params'],
obj=task_ds,
suppress_extended_error=True,
orig_exc=e)
if t._role:
include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target)
else:
include_file = loader.path_dwim(include_target)
data = loader.load_from_file(include_file)
if not data:
display.warning('file %s is empty and had no tasks to include' % include_file)
continue
elif not isinstance(data, list):
raise AnsibleParserError("included task files must contain a list of tasks", obj=data)
# since we can't send callbacks here, we display a message directly in
# the same fashion used by the on_include callback. We also do it here,
# because the recursive nature of helper methods means we may be loading
# nested includes, and we want the include order printed correctly
display.vv("statically imported: %s" % include_file)
ti_copy = t.copy(exclude_parent=True)
ti_copy._parent = block
included_blocks = load_list_of_blocks(
data,
play=play,
parent_block=None,
task_include=ti_copy,
role=role,
use_handlers=use_handlers,
loader=loader,
variable_manager=variable_manager,
)
tags = ti_copy.tags[:]
# now we extend the tags on each of the included blocks
for b in included_blocks:
b.tags = list(set(b.tags).union(tags))
# END FIXME
# FIXME: handlers shouldn't need this special handling, but do
# right now because they don't iterate blocks correctly
if use_handlers:
for b in included_blocks:
task_list.extend(b.block)
else:
task_list.extend(included_blocks)
else:
task_list.append(t)
elif action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES:
if use_handlers:
raise AnsibleParserError(f"Using '{action}' as a handler is not supported.", obj=task_ds)
ir = IncludeRole.load(
task_ds,
block=block,
role=role,
task_include=None,
variable_manager=variable_manager,
loader=loader,
)
if action in C._ACTION_IMPORT_ROLE:
if ir.loop is not None:
raise AnsibleParserError("You cannot use loops on 'import_role' statements. You should use 'include_role' instead.", obj=task_ds)
# we set a flag to indicate this include was static
ir.statically_loaded = True
# template the role name now, if needed
all_vars = variable_manager.get_vars(play=play, task=ir)
templar = Templar(loader=loader, variables=all_vars)
ir.post_validate(templar=templar)
ir._role_name = templar.template(ir._role_name)
# uses compiled list from object
blocks, dummy = ir.get_block_list(variable_manager=variable_manager, loader=loader)
task_list.extend(blocks)
else:
# passes task object itself for latter generation of list
task_list.append(ir)
else:
if use_handlers:
t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
if t.action in C._ACTION_META and t.args.get('_raw_params') == "end_role":
raise AnsibleParserError("Cannot execute 'end_role' from a handler")
else:
t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
if t.action in C._ACTION_META and t.args.get('_raw_params') == "end_role" and role is None:
raise AnsibleParserError("Cannot execute 'end_role' from outside of a role")
task_list.append(t)
return task_list
def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None, collection_search_list=None):
"""
Loads and returns a list of RoleInclude objects from the ds list of role definitions
:param ds: list of roles to load
:param play: calling Play object
:param current_role_path: path of the owning role, if any
:param variable_manager: varmgr to use for templating
:param loader: loader to use for DS parsing/services
:param collection_search_list: list of collections to search for unqualified role names
:return:
"""
# we import here to prevent a circular dependency with imports
from ansible.playbook.role.include import RoleInclude
if not isinstance(ds, list):
raise AnsibleAssertionError('ds (%s) should be a list but was a %s' % (ds, type(ds)))
roles = []
for role_def in ds:
i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager,
loader=loader, collection_list=collection_search_list)
roles.append(i)
return roles
| 15,561
|
Python
|
.py
| 285
| 37.280702
| 155
| 0.550886
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,091
|
notifiable.py
|
ansible_ansible/lib/ansible/playbook/notifiable.py
|
# -*- coding: utf-8 -*-
# Copyright The Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible.playbook.attribute import FieldAttribute
class Notifiable:
notify = FieldAttribute(isa='list')
| 299
|
Python
|
.py
| 7
| 40.714286
| 92
| 0.764706
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,092
|
task_include.py
|
ansible_ansible/lib/ansible/playbook/task_include.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import ansible.constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.sentinel import Sentinel
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
__all__ = ['TaskInclude']
display = Display()
class TaskInclude(Task):
"""
A task include is derived from a regular task to handle the special
circumstances related to the `- include_*: ...` task.
"""
BASE = frozenset(('file', '_raw_params')) # directly assigned
OTHER_ARGS = frozenset(('apply',)) # assigned to matching property
VALID_ARGS = BASE.union(OTHER_ARGS) # all valid args
VALID_INCLUDE_KEYWORDS = frozenset(('action', 'args', 'collections', 'debugger', 'ignore_errors', 'loop', 'loop_control',
'loop_with', 'name', 'no_log', 'register', 'run_once', 'tags', 'timeout', 'vars',
'when'))
def __init__(self, block=None, role=None, task_include=None):
super(TaskInclude, self).__init__(block=block, role=role, task_include=task_include)
self.statically_loaded = False
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
ti = TaskInclude(block=block, role=role, task_include=task_include)
task = ti.check_options(
ti.load_data(data, variable_manager=variable_manager, loader=loader),
data
)
return task
def check_options(self, task, data):
"""
Method for options validation to use in 'load_data' for TaskInclude and HandlerTaskInclude
since they share the same validations. It is not named 'validate_options' on purpose
to prevent confusion with '_validate_*" methods. Note that the task passed might be changed
as a side-effect of this method.
"""
my_arg_names = frozenset(task.args.keys())
# validate bad args, otherwise we silently ignore
bad_opts = my_arg_names.difference(self.VALID_ARGS)
if bad_opts and task.action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS:
raise AnsibleParserError('Invalid options for %s: %s' % (task.action, ','.join(list(bad_opts))), obj=data)
if not task.args.get('_raw_params'):
task.args['_raw_params'] = task.args.pop('file', None)
if not task.args['_raw_params']:
raise AnsibleParserError('No file specified for %s' % task.action, obj=data)
apply_attrs = task.args.get('apply', {})
if apply_attrs and task.action not in C._ACTION_INCLUDE_TASKS:
raise AnsibleParserError('Invalid options for %s: apply' % task.action, obj=data)
elif not isinstance(apply_attrs, dict):
raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
return task
def preprocess_data(self, ds):
ds = super(TaskInclude, self).preprocess_data(ds)
diff = set(ds.keys()).difference(self.VALID_INCLUDE_KEYWORDS)
for k in diff:
# This check doesn't handle ``include`` as we have no idea at this point if it is static or not
if ds[k] is not Sentinel and ds['action'] in C._ACTION_ALL_INCLUDE_ROLE_TASKS:
if C.INVALID_TASK_ATTRIBUTE_FAILED:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (k, self.__class__.__name__), obj=ds)
else:
display.warning("Ignoring invalid attribute: %s" % k)
return ds
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(TaskInclude, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
new_me.statically_loaded = self.statically_loaded
return new_me
def build_parent_block(self):
"""
This method is used to create the parent block for the included tasks
when ``apply`` is specified
"""
apply_attrs = self.args.pop('apply', {})
if apply_attrs:
apply_attrs['block'] = []
p_block = Block.load(
apply_attrs,
play=self._parent._play,
task_include=self,
role=self._role,
variable_manager=self._variable_manager,
loader=self._loader,
)
else:
p_block = self
return p_block
| 5,258
|
Python
|
.py
| 103
| 42.378641
| 125
| 0.648743
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,093
|
handler.py
|
ansible_ansible/lib/ansible/playbook/handler.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleAssertionError
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.task import Task
from ansible.module_utils.six import string_types
class Handler(Task):
listen = NonInheritableFieldAttribute(isa='list', default=list, listof=string_types, static=True)
def __init__(self, block=None, role=None, task_include=None):
self.notified_hosts = []
self.cached_name = False
super(Handler, self).__init__(block=block, role=role, task_include=task_include)
def __repr__(self):
""" returns a human-readable representation of the handler """
return "HANDLER: %s" % self.get_name()
def _validate_listen(self, attr, name, value):
new_value = self.get_validated_value(name, attr, value, None)
if self._role is not None:
for listener in new_value.copy():
new_value.extend([
f"{self._role.get_name(include_role_fqcn=True)} : {listener}",
f"{self._role.get_name(include_role_fqcn=False)} : {listener}",
])
setattr(self, name, new_value)
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Handler(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def notify_host(self, host):
if not self.is_host_notified(host):
self.notified_hosts.append(host)
return True
return False
def remove_host(self, host):
try:
self.notified_hosts.remove(host)
except ValueError:
raise AnsibleAssertionError(
f"Attempting to remove a notification on handler '{self}' for host '{host}' but it has not been notified."
)
def clear_hosts(self):
self.notified_hosts = []
def is_host_notified(self, host):
return host in self.notified_hosts
def serialize(self):
result = super(Handler, self).serialize()
result['is_handler'] = True
return result
| 2,925
|
Python
|
.py
| 63
| 39.52381
| 122
| 0.681658
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,094
|
definition.py
|
ansible_ansible/lib/ansible/playbook/role/definition.py
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _get_collection_role_path
from ansible.utils.path import unfrackpath
from ansible.utils.display import Display
__all__ = ['RoleDefinition']
display = Display()
class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
role = NonInheritableFieldAttribute(isa='string')
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
super(RoleDefinition, self).__init__()
self._play = play
self._variable_manager = variable_manager
self._loader = loader
self._role_path = None
self._role_collection = None
self._role_basedir = role_basedir
self._role_params = dict()
self._collection_list = collection_list
# def __repr__(self):
# return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
@staticmethod
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
# role names that are simply numbers can be parsed by PyYAML
# as integers even when quoted, so turn it into a string type
if isinstance(ds, int):
ds = "%s" % ds
if not isinstance(ds, dict) and not isinstance(ds, string_types) and not isinstance(ds, AnsibleBaseYAMLObject):
raise AnsibleAssertionError()
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
# save the original ds for use later
self._ds = ds
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
# result in a new role name, if it was a file path)
role_name = self._load_role_name(ds)
(role_name, role_path) = self._load_role_path(role_name)
# next, we split the role params out from the valid role
# attributes and update the new datastructure with that
# result and the role name
if isinstance(ds, dict):
(new_role_def, role_params) = self._split_role_params(ds)
new_ds |= new_role_def
self._role_params = role_params
# set the role name in the new ds
new_ds['role'] = role_name
# we store the role path internally
self._role_path = role_path
# and return the cleaned-up data structure
return new_ds
def _load_role_name(self, ds):
"""
Returns the role name (either the role: or name: field) from
the role definition, or (when the role definition is a simple
string), just that string
"""
if isinstance(ds, string_types):
return ds
role_name = ds.get('role', ds.get('name'))
if not role_name or not isinstance(role_name, string_types):
raise AnsibleError('role definitions must contain a role name', obj=ds)
# if we have the required datastructures, and if the role_name
# contains a variable, try and template it now
if self._variable_manager:
all_vars = self._variable_manager.get_vars(play=self._play)
templar = Templar(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
return role_name
def _load_role_path(self, role_name):
"""
the 'role', as specified in the ds (or as a bare string), can either
be a simple name or a full path. If it is a full path, we use the
basename as the role name, otherwise we take the name as-given and
append it to the default role path
"""
# create a templar class to template the dependency names, in
# case they contain variables
if self._variable_manager is not None:
all_vars = self._variable_manager.get_vars(play=self._play)
else:
all_vars = dict()
templar = Templar(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
role_tuple = None
# try to load as a collection-based role first
if self._collection_list or AnsibleCollectionRef.is_valid_fqcr(role_name):
role_tuple = _get_collection_role_path(role_name, self._collection_list)
if role_tuple:
# we found it, stash collection data and return the name/path tuple
self._role_collection = role_tuple[2]
return role_tuple[0:2]
# We didn't find a collection role, look in defined role paths
# FUTURE: refactor this to be callable from internal so we can properly order
# ansible.legacy searches with the collections keyword
# we always start the search for roles in the base directory of the playbook
role_search_paths = [
os.path.join(self._loader.get_basedir(), u'roles'),
]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
role_search_paths.extend(C.DEFAULT_ROLES_PATH)
# next, append the roles basedir, if it was set, so we can
# search relative to that directory for dependent roles
if self._role_basedir:
role_search_paths.append(self._role_basedir)
# finally as a last resort we look in the current basedir as set
# in the loader (which should be the playbook dir itself) but without
# the roles/ dir appended
role_search_paths.append(self._loader.get_basedir())
# now iterate through the possible paths and return the first one we find
for path in role_search_paths:
path = templar.template(path)
role_path = unfrackpath(os.path.join(path, role_name))
if self._loader.path_exists(role_path):
return (role_name, role_path)
# if not found elsewhere try to extract path from name
role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
searches = (self._collection_list or []) + role_search_paths
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(searches)), obj=self._ds)
def _split_role_params(self, ds):
"""
Splits any random role params off from the role spec and store
them in a dictionary of params for parsing later
"""
role_def = dict()
role_params = dict()
base_attribute_names = frozenset(self.fattributes)
for (key, value) in ds.items():
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
# FIXME: hard-coded list of exception key names here corresponds to the
# connection fields in the Base class. There may need to be some
# other mechanism where we exclude certain kinds of field attributes,
# or make this list more automatic in some way so we don't have to
# remember to update it manually.
if key not in base_attribute_names:
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
# this is a field attribute, so copy it over directly
role_def[key] = value
return (role_def, role_params)
def get_role_params(self):
return self._role_params.copy()
def get_role_path(self):
return self._role_path
def get_name(self, include_role_fqcn=True):
if include_role_fqcn:
return '.'.join(x for x in (self._role_collection, self.role) if x)
return self.role
| 9,550
|
Python
|
.py
| 189
| 42.05291
| 119
| 0.664734
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,095
|
include.py
|
ansible_ansible/lib/ansible/playbook/role/include.py
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.playbook.delegatable import Delegatable
from ansible.playbook.role.definition import RoleDefinition
from ansible.module_utils.common.text.converters import to_native
__all__ = ['RoleInclude']
class RoleInclude(RoleDefinition, Delegatable):
"""
A derivative of RoleDefinition, used by playbook code when a role
is included for execution in a play.
"""
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager,
loader=loader, collection_list=collection_list)
@staticmethod
def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None, collection_list=None):
if not (isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)):
raise AnsibleParserError("Invalid role definition: %s" % to_native(data))
if isinstance(data, string_types) and ',' in data:
raise AnsibleError("Invalid old style role requirement: %s" % data)
ri = RoleInclude(play=play, role_basedir=current_role_path, variable_manager=variable_manager, loader=loader, collection_list=collection_list)
return ri.load_data(data, variable_manager=variable_manager, loader=loader)
| 2,342
|
Python
|
.py
| 40
| 53.9
| 150
| 0.754148
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,096
|
requirement.py
|
ansible_ansible/lib/ansible/playbook/role/requirement.py
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.playbook.role.definition import RoleDefinition
from ansible.utils.display import Display
from ansible.utils.galaxy import scm_archive_resource
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
raise AnsibleError("Invalid old style role requirement: %s" % name)
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src' in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
role["scm"], dummy, role["src"] = role["src"].partition('+')
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
return scm_archive_resource(src, scm=scm, name=name, version=version, keep_scm_meta=keep_scm_meta)
| 4,173
|
Python
|
.py
| 101
| 32.029703
| 140
| 0.581418
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,097
|
metadata.py
|
ansible_ansible/lib/ansible/playbook/role/metadata.py
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from ansible.errors import AnsibleParserError, AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.helpers import load_list_of_roles
from ansible.playbook.role.requirement import RoleRequirement
__all__ = ['RoleMetadata']
class RoleMetadata(Base, CollectionSearch):
"""
This class wraps the parsing and validation of the optional metadata
within each Role (meta/main.yml).
"""
allow_duplicates = NonInheritableFieldAttribute(isa='bool', default=False)
dependencies = NonInheritableFieldAttribute(isa='list', default=list)
galaxy_info = NonInheritableFieldAttribute(isa='dict')
argument_specs = NonInheritableFieldAttribute(isa='dict', default=dict)
def __init__(self, owner=None):
self._owner = owner
super(RoleMetadata, self).__init__()
@staticmethod
def load(data, owner, variable_manager=None, loader=None):
"""
Returns a new RoleMetadata object based on the datastructure passed in.
"""
if not isinstance(data, dict):
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
return m
def _load_dependencies(self, attr, ds):
"""
This is a helper loading function for the dependencies list,
which returns a list of RoleInclude objects
"""
roles = []
if ds:
if not isinstance(ds, list):
raise AnsibleParserError("Expected role dependencies to be a list.", obj=self._ds)
for role_def in ds:
# FIXME: consolidate with ansible-galaxy to keep this in sync
if isinstance(role_def, string_types) or 'role' in role_def or 'name' in role_def:
roles.append(role_def)
continue
try:
# role_def is new style: { src: 'galaxy.role,version,name', other_vars: "here" }
def_parsed = RoleRequirement.role_yaml_parse(role_def)
if def_parsed.get('name'):
role_def['name'] = def_parsed['name']
roles.append(role_def)
except AnsibleError as exc:
raise AnsibleParserError(to_native(exc), obj=role_def, orig_exc=exc)
current_role_path = None
collection_search_list = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
# if the calling role has a collections search path defined, consult it
collection_search_list = self._owner.collections[:] or []
# if the calling role is a collection role, ensure that its containing collection is searched first
owner_collection = self._owner._role_collection
if owner_collection:
collection_search_list = [c for c in collection_search_list if c != owner_collection]
collection_search_list.insert(0, owner_collection)
# ensure fallback role search works
if 'ansible.legacy' not in collection_search_list:
collection_search_list.append('ansible.legacy')
try:
return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path,
variable_manager=self._variable_manager, loader=self._loader,
collection_search_list=collection_search_list)
except AssertionError as e:
raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds, orig_exc=e)
def serialize(self):
return dict(
allow_duplicates=self._allow_duplicates,
dependencies=self._dependencies
)
def deserialize(self, data):
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
setattr(self, 'dependencies', data.get('dependencies', []))
| 5,074
|
Python
|
.py
| 98
| 42.520408
| 120
| 0.668819
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,098
|
__init__.py
|
ansible_ansible/lib/ansible/playbook/role/__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import os
from collections.abc import Container, Mapping, Set, Sequence
from types import MappingProxyType
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import binary_type, text_type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
from ansible.playbook.delegatable import Delegatable
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role.metadata import RoleMetadata
from ansible.playbook.taggable import Taggable
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.path import is_subpath
from ansible.utils.vars import combine_vars
__all__ = ['Role', 'hash_params']
# TODO: this should be a utility function, but can't be a member of
# the role due to the fact that it would require the use of self
# in a static method. This is also used in the base class for
# strategies (ansible/plugins/strategy/__init__.py)
def hash_params(params):
"""
Construct a data structure of parameters that is hashable.
This requires changing any mutable data structures into immutable ones.
We chose a frozenset because role parameters have to be unique.
.. warning:: this does not handle unhashable scalars. Two things
mitigate that limitation:
1) There shouldn't be any unhashable scalars specified in the yaml
2) Our only choice would be to return an error anyway.
"""
# Any container is unhashable if it contains unhashable items (for
# instance, tuple() is a Hashable subclass but if it contains a dict, it
# cannot be hashed)
if isinstance(params, Container) and not isinstance(params, (text_type, binary_type)):
if isinstance(params, Mapping):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params.items())
except TypeError:
new_params = set()
for k, v in params.items():
# Hash each entry individually
new_params.add((k, hash_params(v)))
new_params = frozenset(new_params)
elif isinstance(params, (Set, Sequence)):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params)
except TypeError:
new_params = set()
for v in params:
# Hash each entry individually
new_params.add(hash_params(v))
new_params = frozenset(new_params)
else:
# This is just a guess.
new_params = frozenset(params)
return new_params
# Note: We do not handle unhashable scalars but our only choice would be
# to raise an error there anyway.
return frozenset((params,))
class Role(Base, Conditional, Taggable, CollectionSearch, Delegatable):
def __init__(self, play=None, from_files=None, from_include=False, validate=True, public=None, static=True):
self._role_name = None
self._role_path = None
self._role_collection = None
self._role_params = dict()
self._loader = None
self.static = static
# includes (static=false) default to private, while imports (static=true) default to public
# but both can be overridden by global config if set
if public is None:
global_private, origin = C.config.get_config_value_and_origin('DEFAULT_PRIVATE_ROLE_VARS')
if origin == 'default':
self.public = static
else:
self.public = not global_private
else:
self.public = public
self._metadata = RoleMetadata()
self._play = play
self._parents = []
self._dependencies = []
self._all_dependencies = None
self._task_blocks = []
self._handler_blocks = []
self._compiled_handler_blocks = None
self._default_vars = dict()
self._role_vars = dict()
self._had_task_run = dict()
self._completed = dict()
self._should_validate = validate
if from_files is None:
from_files = {}
self._from_files = from_files
# Indicates whether this role was included via include/import_role
self.from_include = from_include
self._hash = None
super(Role, self).__init__()
def __repr__(self):
return self.get_name()
def get_name(self, include_role_fqcn=True):
if include_role_fqcn:
return '.'.join(x for x in (self._role_collection, self._role_name) if x)
return self._role_name
def get_role_path(self):
# Purposefully using realpath for canonical path
return os.path.realpath(self._role_path)
def _get_hash_dict(self):
if self._hash:
return self._hash
self._hash = MappingProxyType(
{
'name': self.get_name(),
'path': self.get_role_path(),
'params': MappingProxyType(self.get_role_params()),
'when': self.when,
'tags': self.tags,
'from_files': MappingProxyType(self._from_files),
'vars': MappingProxyType(self.vars),
'from_include': self.from_include,
}
)
return self._hash
def __eq__(self, other):
if not isinstance(other, Role):
return False
return self._get_hash_dict() == other._get_hash_dict()
@staticmethod
def load(role_include, play, parent_role=None, from_files=None, from_include=False, validate=True, public=None, static=True):
if from_files is None:
from_files = {}
try:
# TODO: need to fix cycle detection in role load (maybe use an empty dict
# for the in-flight in role cache as a sentinel that we're already trying to load
# that role?)
# see https://github.com/ansible/ansible/issues/61527
r = Role(play=play, from_files=from_files, from_include=from_include, validate=validate, public=public, static=static)
r._load_role_data(role_include, parent_role=parent_role)
role_path = r.get_role_path()
if role_path not in play.role_cache:
play.role_cache[role_path] = []
# Using the role path as a cache key is done to improve performance when a large number of roles
# are in use in the play
if r not in play.role_cache[role_path]:
play.role_cache[role_path].append(r)
return r
except RuntimeError:
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
obj=role_include._ds)
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
self._role_path = role_include.get_role_path()
self._role_collection = role_include._role_collection
self._role_params = role_include.get_role_params()
self._variable_manager = role_include.get_variable_manager()
self._loader = role_include.get_loader()
if parent_role:
self.add_parent(parent_role)
# copy over all field attributes from the RoleInclude
# update self._attr directly, to avoid squashing
for attr_name in self.fattributes:
setattr(self, f'_{attr_name}', getattr(role_include, f'_{attr_name}', Sentinel))
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars'), allow_dir=True)
if self._role_vars is None:
self._role_vars = {}
elif not isinstance(self._role_vars, Mapping):
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'), allow_dir=True)
if self._default_vars is None:
self._default_vars = {}
elif not isinstance(self._default_vars, Mapping):
raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
# load the role's other files, if they exist
metadata = self._load_role_yaml('meta')
if metadata:
self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
self._dependencies = self._load_dependencies()
# reset collections list; roles do not inherit collections from parents, just use the defaults
# FUTURE: use a private config default for this so we can allow it to be overridden later
self.collections = []
# configure plugin/collection loading; either prepend the current role's collection or configure legacy plugin loading
# FIXME: need exception for explicit ansible.legacy?
if self._role_collection: # this is a collection-hosted role
self.collections.insert(0, self._role_collection)
else: # this is a legacy role, but set the default collection if there is one
default_collection = AnsibleCollectionConfig.default_collection
if default_collection:
self.collections.insert(0, default_collection)
# legacy role, ensure all plugin dirs under the role are added to plugin search path
add_all_plugin_dirs(self._role_path)
# collections can be specified in metadata for legacy or collection-hosted roles
if self._metadata.collections:
self.collections.extend((c for c in self._metadata.collections if c not in self.collections))
# if any collections were specified, ensure that core or legacy synthetic collections are always included
if self.collections:
# default append collection is core for collection-hosted roles, legacy for others
default_append_collection = 'ansible.builtin' if self._role_collection else 'ansible.legacy'
if 'ansible.builtin' not in self.collections and 'ansible.legacy' not in self.collections:
self.collections.append(default_append_collection)
task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
if self._should_validate:
role_argspecs = self._get_role_argspecs()
task_data = self._prepend_validation_task(task_data, role_argspecs)
if task_data:
try:
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=task_data, orig_exc=e)
handler_data = self._load_role_yaml('handlers', main=self._from_files.get('handlers'))
if handler_data:
try:
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=handler_data, orig_exc=e)
def _get_role_argspecs(self):
"""Get the role argument spec data.
Role arg specs can be in one of two files in the role meta subdir: argument_specs.yml
or main.yml. The former has precedence over the latter. Data is not combined
between the files.
:returns: A dict of all data under the top-level ``argument_specs`` YAML key
in the argument spec file. An empty dict is returned if there is no
argspec data.
"""
base_argspec_path = os.path.join(self._role_path, 'meta', 'argument_specs')
for ext in C.YAML_FILENAME_EXTENSIONS:
full_path = base_argspec_path + ext
if self._loader.path_exists(full_path):
# Note: _load_role_yaml() takes care of rebuilding the path.
argument_specs = self._load_role_yaml('meta', main='argument_specs')
try:
return argument_specs.get('argument_specs') or {}
except AttributeError:
return {}
# We did not find the meta/argument_specs.[yml|yaml] file, so use the spec
# dict from the role meta data, if it exists. Ansible 2.11 and later will
# have the 'argument_specs' attribute, but earlier versions will not.
return getattr(self._metadata, 'argument_specs', {})
def _prepend_validation_task(self, task_data, argspecs):
"""Insert a role validation task if we have a role argument spec.
This method will prepend a validation task to the front of the role task
list to perform argument spec validation before any other tasks, if an arg spec
exists for the entry point. Entry point defaults to `main`.
:param task_data: List of tasks loaded from the role.
:param argspecs: The role argument spec data dict.
:returns: The (possibly modified) task list.
"""
if argspecs:
# Determine the role entry point so we can retrieve the correct argument spec.
# This comes from the `tasks_from` value to include_role or import_role.
entrypoint = self._from_files.get('tasks', 'main')
entrypoint_arg_spec = argspecs.get(entrypoint)
if entrypoint_arg_spec:
validation_task = self._create_validation_task(entrypoint_arg_spec, entrypoint)
# Prepend our validate_argument_spec action to happen before any tasks provided by the role.
# 'any tasks' can and does include 0 or None tasks, in which cases we create a list of tasks and add our
# validate_argument_spec task
if not task_data:
task_data = []
task_data.insert(0, validation_task)
return task_data
def _create_validation_task(self, argument_spec, entrypoint_name):
"""Create a new task data structure that uses the validate_argument_spec action plugin.
:param argument_spec: The arg spec definition for a particular role entry point.
This will be the entire arg spec for the entry point as read from the input file.
:param entrypoint_name: The name of the role entry point associated with the
supplied `argument_spec`.
"""
# If the arg spec provides a short description, use it to flesh out the validation task name
task_name = "Validating arguments against arg spec '%s'" % entrypoint_name
if 'short_description' in argument_spec:
task_name = task_name + ' - ' + argument_spec['short_description']
return {
'action': {
'module': 'ansible.builtin.validate_argument_spec',
# Pass only the 'options' portion of the arg spec to the module.
'argument_spec': argument_spec.get('options', {}),
'provided_arguments': self._role_params,
'validate_args_context': {
'type': 'role',
'name': self._role_name,
'argument_spec_name': entrypoint_name,
'path': self._role_path
},
},
'name': task_name,
'tags': ['always'],
}
def _load_role_yaml(self, subdir, main=None, allow_dir=False):
"""
Find and load role YAML files and return data found.
:param subdir: subdir of role to search (vars, files, tasks, handlers, defaults)
:type subdir: string
:param main: filename to match, will default to 'main.<ext>' if not provided.
:type main: string
:param allow_dir: If true we combine results of multiple matching files found.
If false, highlander rules. Only for vars(dicts) and not tasks(lists).
:type allow_dir: bool
:returns: data from the matched file(s), type can be dict or list depending on vars or tasks.
"""
data = None
file_path = os.path.join(self._role_path, subdir)
if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
# Valid extensions and ordering for roles is hard-coded to maintain portability
extensions = ['.yml', '.yaml', '.json'] # same as default for YAML_FILENAME_EXTENSIONS
# look for files w/o extensions before/after bare name depending on it being set or not
# keep 'main' as original to figure out errors if no files found
if main is None:
_main = 'main'
extensions.append('')
else:
_main = main
extensions.insert(0, '')
# not really 'find_vars_files' but find_files_with_extensions_default_to_yaml_filename_extensions
found_files = self._loader.find_vars_files(file_path, _main, extensions, allow_dir)
if found_files:
for found in found_files:
if not is_subpath(found, file_path):
raise AnsibleParserError("Failed loading '%s' for role (%s) as it is not inside the expected role path: '%s'" %
(to_text(found), self._role_name, to_text(file_path)))
new_data = self._loader.load_from_file(found)
if new_data:
if data is not None and isinstance(new_data, Mapping):
data = combine_vars(data, new_data)
else:
data = new_data
# found data so no need to continue unless we want to merge
if not allow_dir:
break
elif main is not None:
# this won't trigger with default only when <subdir>_from is specified
raise AnsibleParserError("Could not find specified file in role: %s/%s" % (subdir, main))
return data
def _load_dependencies(self):
"""
Recursively loads role dependencies from the metadata list of
dependencies, if it exists
"""
deps = []
for role_include in self._metadata.dependencies:
r = Role.load(role_include, play=self._play, parent_role=self, static=self.static)
deps.append(r)
return deps
# other functions
def add_parent(self, parent_role):
""" adds a role to the list of this roles parents """
if not isinstance(parent_role, Role):
raise AnsibleAssertionError()
if parent_role not in self._parents:
self._parents.append(parent_role)
def get_parents(self):
return self._parents
def get_dep_chain(self):
dep_chain = []
for parent in self._parents:
dep_chain.extend(parent.get_dep_chain())
dep_chain.append(parent)
return dep_chain
def get_default_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
default_vars = dict()
for dep in self.get_all_dependencies():
default_vars = combine_vars(default_vars, dep.get_default_vars())
if dep_chain:
for parent in dep_chain:
default_vars = combine_vars(default_vars, parent._default_vars)
default_vars = combine_vars(default_vars, self._default_vars)
return default_vars
def get_inherited_vars(self, dep_chain=None, only_exports=False):
dep_chain = [] if dep_chain is None else dep_chain
inherited_vars = dict()
if dep_chain:
for parent in dep_chain:
if not only_exports:
inherited_vars = combine_vars(inherited_vars, parent.vars)
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
return inherited_vars
def get_role_params(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
params = {}
if dep_chain:
for parent in dep_chain:
params = combine_vars(params, parent._role_params)
params = combine_vars(params, self._role_params)
return params
def get_vars(self, dep_chain=None, include_params=True, only_exports=False):
dep_chain = [] if dep_chain is None else dep_chain
all_vars = {}
# get role_vars: from parent objects
# TODO: is this right precedence for inherited role_vars?
all_vars = self.get_inherited_vars(dep_chain, only_exports=only_exports)
# get exported variables from meta/dependencies
seen = []
for dep in self.get_all_dependencies():
# Avoid rerunning dupe deps since they can have vars from previous invocations and they accumulate in deps
# TODO: re-examine dep loading to see if we are somehow improperly adding the same dep too many times
if dep not in seen:
# only take 'exportable' vars from deps
all_vars = combine_vars(all_vars, dep.get_vars(include_params=False, only_exports=True))
seen.append(dep)
# role_vars come from vars/ in a role
all_vars = combine_vars(all_vars, self._role_vars)
if not only_exports:
# include_params are 'inline variables' in role invocation. - {role: x, varname: value}
if include_params:
# TODO: add deprecation notice
all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain))
# these come from vars: keyword in role invocation. - {role: x, vars: {varname: value}}
all_vars = combine_vars(all_vars, self.vars)
return all_vars
def get_direct_dependencies(self):
return self._dependencies[:]
def get_all_dependencies(self):
"""
Returns a list of all deps, built recursively from all child dependencies,
in the proper order in which they should be executed or evaluated.
"""
if self._all_dependencies is None:
self._all_dependencies = []
for dep in self.get_direct_dependencies():
for child_dep in dep.get_all_dependencies():
self._all_dependencies.append(child_dep)
self._all_dependencies.append(dep)
return self._all_dependencies
def get_task_blocks(self):
return self._task_blocks[:]
def get_handler_blocks(self, play, dep_chain=None):
# Do not recreate this list each time ``get_handler_blocks`` is called.
# Cache the results so that we don't potentially overwrite with copied duplicates
#
# ``get_handler_blocks`` may be called when handling ``import_role`` during parsing
# as well as with ``Play.compile_roles_handlers`` from ``TaskExecutor``
if self._compiled_handler_blocks:
return self._compiled_handler_blocks
self._compiled_handler_blocks = block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
for dep in self.get_direct_dependencies():
dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for task_block in self._handler_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
return block_list
def has_run(self, host):
"""
Returns true if this role has been iterated over completely and
at least one task was run
"""
return host.name in self._completed
def compile(self, play, dep_chain=None):
"""
Returns the task list for this role, which is created by first
recursively compiling the tasks for all direct dependencies, and
then adding on the tasks for this role.
The role compile() also remembers and saves the dependency chain
with each task, so tasks know by which route they were found, and
can correctly take their parent's tags/conditionals into account.
"""
from ansible.playbook.block import Block
from ansible.playbook.task import Task
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
deps = self.get_direct_dependencies()
for dep in deps:
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for task_block in self._task_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
eor_block = Block(play=play)
eor_block._loader = self._loader
eor_block._role = self
eor_block._variable_manager = self._variable_manager
eor_block.run_once = False
eor_task = Task(block=eor_block)
eor_task._role = self
eor_task.action = 'meta'
eor_task.args = {'_raw_params': 'role_complete'}
eor_task.implicit = True
eor_task.tags = ['always']
eor_task.when = True
eor_block.block = [eor_task]
block_list.append(eor_block)
return block_list
def serialize(self, include_deps=True):
res = super(Role, self).serialize()
res['_role_name'] = self._role_name
res['_role_path'] = self._role_path
res['_role_vars'] = self._role_vars
res['_role_params'] = self._role_params
res['_default_vars'] = self._default_vars
res['_had_task_run'] = self._had_task_run.copy()
res['_completed'] = self._completed.copy()
res['_metadata'] = self._metadata.serialize()
if include_deps:
deps = []
for role in self.get_direct_dependencies():
deps.append(role.serialize())
res['_dependencies'] = deps
parents = []
for parent in self._parents:
parents.append(parent.serialize(include_deps=False))
res['_parents'] = parents
return res
def deserialize(self, data, include_deps=True):
self._role_name = data.get('_role_name', '')
self._role_path = data.get('_role_path', '')
self._role_vars = data.get('_role_vars', dict())
self._role_params = data.get('_role_params', dict())
self._default_vars = data.get('_default_vars', dict())
self._had_task_run = data.get('_had_task_run', dict())
self._completed = data.get('_completed', dict())
if include_deps:
deps = []
for dep in data.get('_dependencies', []):
r = Role()
r.deserialize(dep)
deps.append(r)
setattr(self, '_dependencies', deps)
parent_data = data.get('_parents', [])
parents = []
for parent in parent_data:
r = Role()
r.deserialize(parent, include_deps=False)
parents.append(r)
setattr(self, '_parents', parents)
metadata_data = data.get('_metadata')
if metadata_data:
m = RoleMetadata()
m.deserialize(metadata_data)
self._metadata = m
super(Role, self).deserialize(data)
def set_loader(self, loader):
self._loader = loader
for parent in self._parents:
parent.set_loader(loader)
for dep in self.get_direct_dependencies():
dep.set_loader(loader)
| 29,713
|
Python
|
.py
| 574
| 40.722997
| 156
| 0.62282
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,099
|
manager.py
|
ansible_ansible/lib/ansible/config/manager.py
|
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import atexit
import decimal
import configparser
import os
import os.path
import sys
import stat
import tempfile
from collections import namedtuple
from collections.abc import Mapping, Sequence
from jinja2.nativetypes import NativeEnvironment
from ansible.errors import AnsibleOptionsError, AnsibleError, AnsibleRequiredOptionError
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
from ansible.module_utils.common.yaml import yaml_load
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.quoting import unquote
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.path import cleanup_tmp_file, makedirs_safe, unfrackpath
Setting = namedtuple('Setting', 'name value origin type')
INTERNAL_DEFS = {'lookup': ('_terms',)}
GALAXY_SERVER_DEF = [
('url', True, 'str'),
('username', False, 'str'),
('password', False, 'str'),
('token', False, 'str'),
('auth_url', False, 'str'),
('api_version', False, 'int'),
('validate_certs', False, 'bool'),
('client_id', False, 'str'),
('timeout', False, 'int'),
]
# config definition fields
GALAXY_SERVER_ADDITIONAL = {
'api_version': {'default': None, 'choices': [2, 3]},
'validate_certs': {'cli': [{'name': 'validate_certs'}]},
'timeout': {'cli': [{'name': 'timeout'}]},
'token': {'default': None},
}
def _get_entry(plugin_type, plugin_name, config):
""" construct entry for requested config """
entry = ''
if plugin_type:
entry += 'plugin_type: %s ' % plugin_type
if plugin_name:
entry += 'plugin: %s ' % plugin_name
entry += 'setting: %s ' % config
return entry
# FIXME: see if we can unify in module_utils with similar function used by argspec
def ensure_type(value, value_type, origin=None, origin_ftype=None):
""" return a configuration variable with casting
:arg value: The value to ensure correct typing of
:kwarg value_type: The type of the value. This can be any of the following strings:
:boolean: sets the value to a True or False value
:bool: Same as 'boolean'
:integer: Sets the value to an integer or raises a ValueType error
:int: Same as 'integer'
:float: Sets the value to a float or raises a ValueType error
:list: Treats the value as a comma separated list. Split the value
and return it as a python list.
:none: Sets the value to None
:path: Expands any environment variables and tilde's in the value.
:tmppath: Create a unique temporary directory inside of the directory
specified by value and return its path.
:temppath: Same as 'tmppath'
:tmp: Same as 'tmppath'
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
means comma separated strings.) Split the value and then expand
each part for environment variables and tildes.
:pathspec: Treat the value as a PATH string. Expands any environment variables
tildes's in the value.
:str: Sets the value to string types.
:string: Same as 'str'
"""
errmsg = ''
basedir = None
if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)):
basedir = origin
if value_type:
value_type = value_type.lower()
if value is not None:
if value_type in ('boolean', 'bool'):
value = boolean(value, strict=False)
elif value_type in ('integer', 'int'):
if not isinstance(value, int):
try:
if (decimal_value := decimal.Decimal(value)) == (int_part := int(decimal_value)):
value = int_part
else:
errmsg = 'int'
except decimal.DecimalException as e:
raise ValueError from e
elif value_type == 'float':
if not isinstance(value, float):
value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
value = [unquote(x.strip()) for x in value.split(',')]
elif not isinstance(value, Sequence):
errmsg = 'list'
elif value_type == 'none':
if value == "None":
value = None
if value is not None:
errmsg = 'None'
elif value_type == 'path':
if isinstance(value, string_types):
value = resolve_path(value, basedir=basedir)
else:
errmsg = 'path'
elif value_type in ('tmp', 'temppath', 'tmppath'):
if isinstance(value, string_types):
value = resolve_path(value, basedir=basedir)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
atexit.register(cleanup_tmp_file, value, warn=True)
else:
errmsg = 'temppath'
elif value_type == 'pathspec':
if isinstance(value, string_types):
value = value.split(os.pathsep)
if isinstance(value, Sequence):
value = [resolve_path(x, basedir=basedir) for x in value]
else:
errmsg = 'pathspec'
elif value_type == 'pathlist':
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
if isinstance(value, Sequence):
value = [resolve_path(x, basedir=basedir) for x in value]
else:
errmsg = 'pathlist'
elif value_type in ('dict', 'dictionary'):
if not isinstance(value, Mapping):
errmsg = 'dictionary'
elif value_type in ('str', 'string'):
if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode, bool, int, float, complex)):
value = to_text(value, errors='surrogate_or_strict')
if origin_ftype and origin_ftype == 'ini':
value = unquote(value)
else:
errmsg = 'string'
# defaults to string type
elif isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
value = to_text(value, errors='surrogate_or_strict')
if origin_ftype and origin_ftype == 'ini':
value = unquote(value)
if errmsg:
raise ValueError(f'Invalid type provided for "{errmsg}": {value!r}')
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
# FIXME: see if this can live in utils/path
def resolve_path(path, basedir=None):
""" resolve relative or 'variable' paths """
if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}}
path = path.replace('{{CWD}}', os.getcwd())
return unfrackpath(path, follow=False, basedir=basedir)
# FIXME: generic file type?
def get_config_type(cfile):
ftype = None
if cfile is not None:
ext = os.path.splitext(cfile)[-1]
if ext in ('.ini', '.cfg'):
ftype = 'ini'
elif ext in ('.yaml', '.yml'):
ftype = 'yaml'
else:
raise AnsibleOptionsError("Unsupported configuration file extension for %s: %s" % (cfile, to_native(ext)))
return ftype
# FIXME: can move to module_utils for use for ini plugins also?
def get_ini_config_value(p, entry):
""" returns the value of last ini entry found """
value = None
if p is not None:
try:
value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True)
except Exception: # FIXME: actually report issues here
pass
return value
def find_ini_config_file(warnings=None):
""" Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible """
# FIXME: eventually deprecate ini configs
if warnings is None:
# Note: In this case, warnings does nothing
warnings = set()
potential_paths = []
# A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later
# We can't use None because we could set path to None.
# Environment setting
path_from_env = os.getenv("ANSIBLE_CONFIG", Sentinel)
if path_from_env is not Sentinel:
path_from_env = unfrackpath(path_from_env, follow=False)
if os.path.isdir(to_bytes(path_from_env)):
path_from_env = os.path.join(path_from_env, "ansible.cfg")
potential_paths.append(path_from_env)
# Current working directory
warn_cmd_public = False
try:
cwd = os.getcwd()
perms = os.stat(cwd)
cwd_cfg = os.path.join(cwd, "ansible.cfg")
if perms.st_mode & stat.S_IWOTH:
# Working directory is world writable so we'll skip it.
# Still have to look for a file here, though, so that we know if we have to warn
if os.path.exists(cwd_cfg):
warn_cmd_public = True
else:
potential_paths.append(to_text(cwd_cfg, errors='surrogate_or_strict'))
except OSError:
# If we can't access cwd, we'll simply skip it as a possible config source
pass
# Per user location
potential_paths.append(unfrackpath("~/.ansible.cfg", follow=False))
# System location
potential_paths.append("/etc/ansible/ansible.cfg")
for path in potential_paths:
b_path = to_bytes(path)
if os.path.exists(b_path) and os.access(b_path, os.R_OK):
break
else:
path = None
# Emit a warning if all the following are true:
# * We did not use a config from ANSIBLE_CONFIG
# * There's an ansible.cfg in the current working directory that we skipped
if path_from_env != path and warn_cmd_public:
warnings.add(u"Ansible is being run in a world writable directory (%s),"
u" ignoring it as an ansible.cfg source."
u" For more information see"
u" https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir"
% to_text(cwd))
return path
def _add_base_defs_deprecations(base_defs):
"""Add deprecation source 'ansible.builtin' to deprecations in base.yml"""
def process(entry):
if 'deprecated' in entry:
entry['deprecated']['collection_name'] = 'ansible.builtin'
for dummy, data in base_defs.items():
process(data)
for section in ('ini', 'env', 'vars'):
if section in data:
for entry in data[section]:
process(entry)
class ConfigManager(object):
DEPRECATED = [] # type: list[tuple[str, dict[str, str]]]
WARNINGS = set() # type: set[str]
def __init__(self, conf_file=None, defs_file=None):
self._base_defs = {}
self._plugins = {}
self._parsers = {}
self._config_file = conf_file
self._base_defs = self._read_config_yaml_file(defs_file or ('%s/base.yml' % os.path.dirname(__file__)))
_add_base_defs_deprecations(self._base_defs)
if self._config_file is None:
# set config using ini
self._config_file = find_ini_config_file(self.WARNINGS)
# consume configuration
if self._config_file:
# initialize parser and read config
self._parse_config_file()
# ensure we always have config def entry
self._base_defs['CONFIG_FILE'] = {'default': None, 'type': 'path'}
def load_galaxy_server_defs(self, server_list):
def server_config_def(section, key, required, option_type):
config_def = {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
'type': option_type,
}
if key in GALAXY_SERVER_ADDITIONAL:
config_def.update(GALAXY_SERVER_ADDITIONAL[key])
# ensure we always have a default timeout
if key == 'timeout' and 'default' not in config_def:
config_def['default'] = self.get_config_value('GALAXY_SERVER_TIMEOUT')
return config_def
if server_list:
for server_key in server_list:
if not server_key:
# To filter out empty strings or non truthy values as an empty server list env var is equal to [''].
continue
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
defs = dict((k, server_config_def(server_key, k, req, value_type)) for k, req, value_type in GALAXY_SERVER_DEF)
self.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
def template_default(self, value, variables):
if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None:
# template default values if possible
# NOTE: cannot use is_template due to circular dep
try:
t = NativeEnvironment().from_string(value)
value = t.render(variables)
except Exception:
pass # not templatable
return value
def _read_config_yaml_file(self, yml_file):
# TODO: handle relative paths as relative to the directory containing the current playbook instead of CWD
# Currently this is only used with absolute paths to the `ansible/config` directory
yml_file = to_bytes(yml_file)
if os.path.exists(yml_file):
with open(yml_file, 'rb') as config_def:
return yaml_load(config_def) or {}
raise AnsibleError(
"Missing base YAML definition file (bad install?): %s" % to_native(yml_file))
def _parse_config_file(self, cfile=None):
""" return flat configuration settings from file(s) """
# TODO: take list of files with merge/nomerge
if cfile is None:
cfile = self._config_file
ftype = get_config_type(cfile)
if cfile is not None:
if ftype == 'ini':
self._parsers[cfile] = configparser.ConfigParser(inline_comment_prefixes=(';',))
with open(to_bytes(cfile), 'rb') as f:
try:
cfg_text = to_text(f.read(), errors='surrogate_or_strict')
except UnicodeError as e:
raise AnsibleOptionsError("Error reading config file(%s) because the config file was not utf8 encoded: %s" % (cfile, to_native(e)))
try:
self._parsers[cfile].read_string(cfg_text)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file (%s): %s" % (cfile, to_native(e)))
# FIXME: this should eventually handle yaml config files
# elif ftype == 'yaml':
# with open(cfile, 'rb') as config_stream:
# self._parsers[cfile] = yaml_load(config_stream)
else:
raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype))
def _find_yaml_config_files(self):
""" Load YAML Config Files in order, check merge flags, keep origin of settings"""
pass
def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None):
options = {}
defs = self.get_configuration_definitions(plugin_type=plugin_type, name=name)
for option in defs:
options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct)
return options
def get_plugin_vars(self, plugin_type, name):
pvars = []
for pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).values():
if 'vars' in pdef and pdef['vars']:
for var_entry in pdef['vars']:
pvars.append(var_entry['name'])
return pvars
def get_plugin_options_from_var(self, plugin_type, name, variable):
options = []
for option_name, pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).items():
if 'vars' in pdef and pdef['vars']:
for var_entry in pdef['vars']:
if variable == var_entry['name']:
options.append(option_name)
return options
def get_configuration_definition(self, name, plugin_type=None, plugin_name=None):
ret = {}
if plugin_type is None:
ret = self._base_defs.get(name, None)
elif plugin_name is None:
ret = self._plugins.get(plugin_type, {}).get(name, None)
else:
ret = self._plugins.get(plugin_type, {}).get(plugin_name, {}).get(name, None)
return ret
def has_configuration_definition(self, plugin_type, name):
has = False
if plugin_type in self._plugins:
has = (name in self._plugins[plugin_type])
return has
def get_configuration_definitions(self, plugin_type=None, name=None, ignore_private=False):
""" just list the possible settings, either base or for specific plugins or plugin """
ret = {}
if plugin_type is None:
ret = self._base_defs
elif name is None:
ret = self._plugins.get(plugin_type, {})
else:
ret = self._plugins.get(plugin_type, {}).get(name, {})
if ignore_private:
for cdef in list(ret.keys()):
if cdef.startswith('_'):
del ret[cdef]
return ret
def _loop_entries(self, container, entry_list):
""" repeat code for value entry assignment """
value = None
origin = None
for entry in entry_list:
name = entry.get('name')
try:
temp_value = container.get(name, None)
except UnicodeEncodeError:
self.WARNINGS.add(u'value for config entry {0} contains invalid characters, ignoring...'.format(to_text(name)))
continue
if temp_value is not None: # only set if entry is defined in container
# inline vault variables should be converted to a text string
if isinstance(temp_value, AnsibleVaultEncryptedUnicode):
temp_value = to_text(temp_value, errors='surrogate_or_strict')
value = temp_value
origin = name
# deal with deprecation of setting source, if used
if 'deprecated' in entry:
self.DEPRECATED.append((entry['name'], entry['deprecated']))
return value, origin
def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
""" wrapper """
try:
value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name,
keys=keys, variables=variables, direct=direct)
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("Unhandled exception when retrieving %s:\n%s" % (config, to_native(e)), orig_exc=e)
return value
def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
""" Given a config key figure out the actual value and report on the origin of the settings """
if cfile is None:
# use default config
cfile = self._config_file
if config == 'CONFIG_FILE':
return cfile, ''
# Note: sources that are lists listed in low to high precedence (last one wins)
value = None
origin = None
origin_ftype = None
defs = self.get_configuration_definitions(plugin_type=plugin_type, name=plugin_name)
if config in defs:
aliases = defs[config].get('aliases', [])
# direct setting via plugin arguments, can set to None so we bypass rest of processing/defaults
if direct:
if config in direct:
value = direct[config]
origin = 'Direct'
else:
direct_aliases = [direct[alias] for alias in aliases if alias in direct]
if direct_aliases:
value = direct_aliases[0]
origin = 'Direct'
if value is None and variables and defs[config].get('vars'):
# Use 'variable overrides' if present, highest precedence, but only present when querying running play
value, origin = self._loop_entries(variables, defs[config]['vars'])
origin = 'var: %s' % origin
# use playbook keywords if you have em
if value is None and defs[config].get('keyword') and keys:
value, origin = self._loop_entries(keys, defs[config]['keyword'])
origin = 'keyword: %s' % origin
# automap to keywords
# TODO: deprecate these in favor of explicit keyword above
if value is None and keys:
if config in keys:
value = keys[config]
keyword = config
elif aliases:
for alias in aliases:
if alias in keys:
value = keys[alias]
keyword = alias
break
if value is not None:
origin = 'keyword: %s' % keyword
if value is None and 'cli' in defs[config]:
# avoid circular import .. until valid
from ansible import context
value, origin = self._loop_entries(context.CLIARGS, defs[config]['cli'])
origin = 'cli: %s' % origin
# env vars are next precedence
if value is None and defs[config].get('env'):
value, origin = self._loop_entries(os.environ, defs[config]['env'])
origin = 'env: %s' % origin
# try config file entries next, if we have one
if self._parsers.get(cfile, None) is None:
self._parse_config_file(cfile)
# attempt to read from config file
if value is None and cfile is not None:
ftype = get_config_type(cfile)
if ftype and defs[config].get(ftype):
try:
for entry in defs[config][ftype]:
# load from config
if ftype == 'ini':
temp_value = get_ini_config_value(self._parsers[cfile], entry)
elif ftype == 'yaml':
raise AnsibleError('YAML configuration type has not been implemented yet')
else:
raise AnsibleError('Invalid configuration file type: %s' % ftype)
if temp_value is not None:
# set value and origin
value = temp_value
origin = cfile
origin_ftype = ftype
if 'deprecated' in entry:
if ftype == 'ini':
self.DEPRECATED.append(('[%s]%s' % (entry['section'], entry['key']), entry['deprecated']))
else:
raise AnsibleError('Unimplemented file type: %s' % ftype)
except Exception as e:
sys.stderr.write("Error while loading config %s: %s" % (cfile, to_native(e)))
# set default if we got here w/o a value
if value is None:
if defs[config].get('required', False):
if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
raise AnsibleRequiredOptionError("No setting was provided for required configuration %s" %
to_native(_get_entry(plugin_type, plugin_name, config)))
else:
origin = 'default'
value = self.template_default(defs[config].get('default'), variables)
try:
# ensure correct type, can raise exceptions on mismatched types
value = ensure_type(value, defs[config].get('type'), origin=origin, origin_ftype=origin_ftype)
except ValueError as e:
if origin.startswith('env:') and value == '':
# this is empty env var for non string so we can set to default
origin = 'default'
value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin, origin_ftype=origin_ftype)
else:
raise AnsibleOptionsError('Invalid type for configuration option %s (from %s): %s' %
(to_native(_get_entry(plugin_type, plugin_name, config)).strip(), origin, to_native(e)))
# deal with restricted values
if value is not None and 'choices' in defs[config] and defs[config]['choices'] is not None:
invalid_choices = True # assume the worst!
if defs[config].get('type') == 'list':
# for a list type, compare all values in type are allowed
invalid_choices = not all(choice in defs[config]['choices'] for choice in value)
else:
# these should be only the simple data types (string, int, bool, float, etc) .. ignore dicts for now
invalid_choices = value not in defs[config]['choices']
if invalid_choices:
if isinstance(defs[config]['choices'], Mapping):
valid = ', '.join([to_text(k) for k in defs[config]['choices'].keys()])
elif isinstance(defs[config]['choices'], string_types):
valid = defs[config]['choices']
elif isinstance(defs[config]['choices'], Sequence):
valid = ', '.join([to_text(c) for c in defs[config]['choices']])
else:
valid = defs[config]['choices']
raise AnsibleOptionsError('Invalid value "%s" for configuration option "%s", valid values are: %s' %
(value, to_native(_get_entry(plugin_type, plugin_name, config)), valid))
# deal with deprecation of the setting
if 'deprecated' in defs[config] and origin != 'default':
self.DEPRECATED.append((config, defs[config].get('deprecated')))
else:
raise AnsibleError('Requested entry (%s) was not defined in configuration.' % to_native(_get_entry(plugin_type, plugin_name, config)))
return value, origin
def initialize_plugin_configuration_definitions(self, plugin_type, name, defs):
if plugin_type not in self._plugins:
self._plugins[plugin_type] = {}
self._plugins[plugin_type][name] = defs
@staticmethod
def get_deprecated_msg_from_config(dep_docs, include_removal=False, collection_name=None):
removal = ''
if include_removal:
if 'removed_at_date' in dep_docs:
removal = f"Will be removed in a release after {dep_docs['removed_at_date']}\n\t"
elif collection_name:
removal = f"Will be removed in: {collection_name} {dep_docs['removed_in']}\n\t"
else:
removal = f"Will be removed in: Ansible {dep_docs['removed_in']}\n\t"
# TODO: choose to deprecate either singular or plural
alt = dep_docs.get('alternatives', dep_docs.get('alternative', 'none'))
return f"Reason: {dep_docs['why']}\n\t{removal}Alternatives: {alt}"
| 29,319
|
Python
|
.py
| 569
| 38.622144
| 155
| 0.577758
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|